ZTWHHH commited on
Commit
4e408a0
·
verified ·
1 Parent(s): 46ef1d2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py +5 -0
  2. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py +13 -0
  3. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py +157 -0
  4. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py +296 -0
  5. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py +17 -0
  6. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_D_T_.py +827 -0
  7. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py +710 -0
  8. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py +42 -0
  9. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py +5 -0
  10. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_K_G_.py +126 -0
  11. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py +5 -0
  12. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py +234 -0
  13. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py +5 -0
  14. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py +92 -0
  15. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py +5 -0
  16. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_B_.py +5 -0
  17. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_D_.py +5 -0
  18. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py +5 -0
  19. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py +164 -0
  20. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__5.py +47 -0
  21. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py +846 -0
  22. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py +5 -0
  23. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/B_A_S_E_.cpython-310.pyc +0 -0
  24. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/BitmapGlyphMetrics.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/D__e_b_g.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc +0 -0
  27. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_K_G_.cpython-310.pyc +0 -0
  28. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc +0 -0
  29. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc +0 -0
  30. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_a_t.cpython-310.pyc +0 -0
  31. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/J_S_T_F_.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_V_A_R_.cpython-310.pyc +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc +0 -0
  35. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc +0 -0
  41. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc +0 -0
  42. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc +0 -0
  43. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_a_s_p.cpython-310.pyc +0 -0
  46. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_c_i_d.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_c_a_r.cpython-310.pyc +0 -0
  48. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc +0 -0
  49. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_e_t_a.cpython-310.pyc +0 -0
  50. evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_t.cpython-310.pyc +0 -0
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_B_A_S_E_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
3
+
4
+
5
+ class table_C_F_F__2(table_C_F_F_):
6
+ def decompile(self, data, otFont):
7
+ self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
8
+ assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
9
+
10
+ def compile(self, otFont):
11
+ f = BytesIO()
12
+ self.cff.compile(f, otFont, isCFF2=True)
13
+ return f.getvalue()
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2013 Google, Inc. All Rights Reserved.
2
+ #
3
+ # Google Author(s): Behdad Esfahbod
4
+
5
+ from fontTools.misc.textTools import safeEval
6
+ from . import DefaultTable
7
+
8
+
9
+ class table_C_O_L_R_(DefaultTable.DefaultTable):
10
+ """This table is structured so that you can treat it like a dictionary keyed by glyph name.
11
+
12
+ ``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
13
+
14
+ ``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
15
+ """
16
+
17
+ @staticmethod
18
+ def _decompileColorLayersV0(table):
19
+ if not table.LayerRecordArray:
20
+ return {}
21
+ colorLayerLists = {}
22
+ layerRecords = table.LayerRecordArray.LayerRecord
23
+ numLayerRecords = len(layerRecords)
24
+ for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
25
+ baseGlyph = baseRec.BaseGlyph
26
+ firstLayerIndex = baseRec.FirstLayerIndex
27
+ numLayers = baseRec.NumLayers
28
+ assert firstLayerIndex + numLayers <= numLayerRecords
29
+ layers = []
30
+ for i in range(firstLayerIndex, firstLayerIndex + numLayers):
31
+ layerRec = layerRecords[i]
32
+ layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
33
+ colorLayerLists[baseGlyph] = layers
34
+ return colorLayerLists
35
+
36
+ def _toOTTable(self, ttFont):
37
+ from . import otTables
38
+ from fontTools.colorLib.builder import populateCOLRv0
39
+
40
+ tableClass = getattr(otTables, self.tableTag)
41
+ table = tableClass()
42
+ table.Version = self.version
43
+
44
+ populateCOLRv0(
45
+ table,
46
+ {
47
+ baseGlyph: [(layer.name, layer.colorID) for layer in layers]
48
+ for baseGlyph, layers in self.ColorLayers.items()
49
+ },
50
+ glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
51
+ )
52
+ return table
53
+
54
+ def decompile(self, data, ttFont):
55
+ from .otBase import OTTableReader
56
+ from . import otTables
57
+
58
+ # We use otData to decompile, but we adapt the decompiled otTables to the
59
+ # existing COLR v0 API for backward compatibility.
60
+ reader = OTTableReader(data, tableTag=self.tableTag)
61
+ tableClass = getattr(otTables, self.tableTag)
62
+ table = tableClass()
63
+ table.decompile(reader, ttFont)
64
+
65
+ self.version = table.Version
66
+ if self.version == 0:
67
+ self.ColorLayers = self._decompileColorLayersV0(table)
68
+ else:
69
+ # for new versions, keep the raw otTables around
70
+ self.table = table
71
+
72
+ def compile(self, ttFont):
73
+ from .otBase import OTTableWriter
74
+
75
+ if hasattr(self, "table"):
76
+ table = self.table
77
+ else:
78
+ table = self._toOTTable(ttFont)
79
+
80
+ writer = OTTableWriter(tableTag=self.tableTag)
81
+ table.compile(writer, ttFont)
82
+ return writer.getAllData()
83
+
84
+ def toXML(self, writer, ttFont):
85
+ if hasattr(self, "table"):
86
+ self.table.toXML2(writer, ttFont)
87
+ else:
88
+ writer.simpletag("version", value=self.version)
89
+ writer.newline()
90
+ for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
91
+ writer.begintag("ColorGlyph", name=baseGlyph)
92
+ writer.newline()
93
+ for layer in self.ColorLayers[baseGlyph]:
94
+ layer.toXML(writer, ttFont)
95
+ writer.endtag("ColorGlyph")
96
+ writer.newline()
97
+
98
+ def fromXML(self, name, attrs, content, ttFont):
99
+ if name == "version": # old COLR v0 API
100
+ setattr(self, name, safeEval(attrs["value"]))
101
+ elif name == "ColorGlyph":
102
+ if not hasattr(self, "ColorLayers"):
103
+ self.ColorLayers = {}
104
+ glyphName = attrs["name"]
105
+ for element in content:
106
+ if isinstance(element, str):
107
+ continue
108
+ layers = []
109
+ for element in content:
110
+ if isinstance(element, str):
111
+ continue
112
+ layer = LayerRecord()
113
+ layer.fromXML(element[0], element[1], element[2], ttFont)
114
+ layers.append(layer)
115
+ self.ColorLayers[glyphName] = layers
116
+ else: # new COLR v1 API
117
+ from . import otTables
118
+
119
+ if not hasattr(self, "table"):
120
+ tableClass = getattr(otTables, self.tableTag)
121
+ self.table = tableClass()
122
+ self.table.fromXML(name, attrs, content, ttFont)
123
+ self.table.populateDefaults()
124
+ self.version = self.table.Version
125
+
126
+ def __getitem__(self, glyphName):
127
+ if not isinstance(glyphName, str):
128
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
129
+ return self.ColorLayers[glyphName]
130
+
131
+ def __setitem__(self, glyphName, value):
132
+ if not isinstance(glyphName, str):
133
+ raise TypeError(f"expected str, found {type(glyphName).__name__}")
134
+ if value is not None:
135
+ self.ColorLayers[glyphName] = value
136
+ elif glyphName in self.ColorLayers:
137
+ del self.ColorLayers[glyphName]
138
+
139
+ def __delitem__(self, glyphName):
140
+ del self.ColorLayers[glyphName]
141
+
142
+
143
+ class LayerRecord(object):
144
+ def __init__(self, name=None, colorID=None):
145
+ self.name = name
146
+ self.colorID = colorID
147
+
148
+ def toXML(self, writer, ttFont):
149
+ writer.simpletag("layer", name=self.name, colorID=self.colorID)
150
+ writer.newline()
151
+
152
+ def fromXML(self, eltname, attrs, content, ttFont):
153
+ for name, value in attrs.items():
154
+ if name == "name":
155
+ setattr(self, name, value)
156
+ else:
157
+ setattr(self, name, safeEval(value))
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2013 Google, Inc. All Rights Reserved.
2
+ #
3
+ # Google Author(s): Behdad Esfahbod
4
+
5
+ from fontTools.misc.textTools import bytesjoin, safeEval
6
+ from . import DefaultTable
7
+ import array
8
+ from collections import namedtuple
9
+ import struct
10
+ import sys
11
+
12
+
13
+ class table_C_P_A_L_(DefaultTable.DefaultTable):
14
+ NO_NAME_ID = 0xFFFF
15
+ DEFAULT_PALETTE_TYPE = 0
16
+
17
+ def __init__(self, tag=None):
18
+ DefaultTable.DefaultTable.__init__(self, tag)
19
+ self.palettes = []
20
+ self.paletteTypes = []
21
+ self.paletteLabels = []
22
+ self.paletteEntryLabels = []
23
+
24
+ def decompile(self, data, ttFont):
25
+ (
26
+ self.version,
27
+ self.numPaletteEntries,
28
+ numPalettes,
29
+ numColorRecords,
30
+ goffsetFirstColorRecord,
31
+ ) = struct.unpack(">HHHHL", data[:12])
32
+ assert (
33
+ self.version <= 1
34
+ ), "Version of CPAL table is higher than I know how to handle"
35
+ self.palettes = []
36
+ pos = 12
37
+ for i in range(numPalettes):
38
+ startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
39
+ assert startIndex + self.numPaletteEntries <= numColorRecords
40
+ pos += 2
41
+ palette = []
42
+ ppos = goffsetFirstColorRecord + startIndex * 4
43
+ for j in range(self.numPaletteEntries):
44
+ palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
45
+ ppos += 4
46
+ self.palettes.append(palette)
47
+ if self.version == 0:
48
+ offsetToPaletteTypeArray = 0
49
+ offsetToPaletteLabelArray = 0
50
+ offsetToPaletteEntryLabelArray = 0
51
+ else:
52
+ pos = 12 + numPalettes * 2
53
+ (
54
+ offsetToPaletteTypeArray,
55
+ offsetToPaletteLabelArray,
56
+ offsetToPaletteEntryLabelArray,
57
+ ) = struct.unpack(">LLL", data[pos : pos + 12])
58
+ self.paletteTypes = self._decompileUInt32Array(
59
+ data,
60
+ offsetToPaletteTypeArray,
61
+ numPalettes,
62
+ default=self.DEFAULT_PALETTE_TYPE,
63
+ )
64
+ self.paletteLabels = self._decompileUInt16Array(
65
+ data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
66
+ )
67
+ self.paletteEntryLabels = self._decompileUInt16Array(
68
+ data,
69
+ offsetToPaletteEntryLabelArray,
70
+ self.numPaletteEntries,
71
+ default=self.NO_NAME_ID,
72
+ )
73
+
74
+ def _decompileUInt16Array(self, data, offset, numElements, default=0):
75
+ if offset == 0:
76
+ return [default] * numElements
77
+ result = array.array("H", data[offset : offset + 2 * numElements])
78
+ if sys.byteorder != "big":
79
+ result.byteswap()
80
+ assert len(result) == numElements, result
81
+ return result.tolist()
82
+
83
+ def _decompileUInt32Array(self, data, offset, numElements, default=0):
84
+ if offset == 0:
85
+ return [default] * numElements
86
+ result = array.array("I", data[offset : offset + 4 * numElements])
87
+ if sys.byteorder != "big":
88
+ result.byteswap()
89
+ assert len(result) == numElements, result
90
+ return result.tolist()
91
+
92
+ def compile(self, ttFont):
93
+ colorRecordIndices, colorRecords = self._compileColorRecords()
94
+ paletteTypes = self._compilePaletteTypes()
95
+ paletteLabels = self._compilePaletteLabels()
96
+ paletteEntryLabels = self._compilePaletteEntryLabels()
97
+ numColorRecords = len(colorRecords) // 4
98
+ offsetToFirstColorRecord = 12 + len(colorRecordIndices)
99
+ if self.version >= 1:
100
+ offsetToFirstColorRecord += 12
101
+ header = struct.pack(
102
+ ">HHHHL",
103
+ self.version,
104
+ self.numPaletteEntries,
105
+ len(self.palettes),
106
+ numColorRecords,
107
+ offsetToFirstColorRecord,
108
+ )
109
+ if self.version == 0:
110
+ dataList = [header, colorRecordIndices, colorRecords]
111
+ else:
112
+ pos = offsetToFirstColorRecord + len(colorRecords)
113
+ if len(paletteTypes) == 0:
114
+ offsetToPaletteTypeArray = 0
115
+ else:
116
+ offsetToPaletteTypeArray = pos
117
+ pos += len(paletteTypes)
118
+ if len(paletteLabels) == 0:
119
+ offsetToPaletteLabelArray = 0
120
+ else:
121
+ offsetToPaletteLabelArray = pos
122
+ pos += len(paletteLabels)
123
+ if len(paletteEntryLabels) == 0:
124
+ offsetToPaletteEntryLabelArray = 0
125
+ else:
126
+ offsetToPaletteEntryLabelArray = pos
127
+ pos += len(paletteLabels)
128
+ header1 = struct.pack(
129
+ ">LLL",
130
+ offsetToPaletteTypeArray,
131
+ offsetToPaletteLabelArray,
132
+ offsetToPaletteEntryLabelArray,
133
+ )
134
+ dataList = [
135
+ header,
136
+ colorRecordIndices,
137
+ header1,
138
+ colorRecords,
139
+ paletteTypes,
140
+ paletteLabels,
141
+ paletteEntryLabels,
142
+ ]
143
+ return bytesjoin(dataList)
144
+
145
+ def _compilePalette(self, palette):
146
+ assert len(palette) == self.numPaletteEntries
147
+ pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
148
+ return bytesjoin([pack(color) for color in palette])
149
+
150
+ def _compileColorRecords(self):
151
+ colorRecords, colorRecordIndices, pool = [], [], {}
152
+ for palette in self.palettes:
153
+ packedPalette = self._compilePalette(palette)
154
+ if packedPalette in pool:
155
+ index = pool[packedPalette]
156
+ else:
157
+ index = len(colorRecords)
158
+ colorRecords.append(packedPalette)
159
+ pool[packedPalette] = index
160
+ colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
161
+ return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
162
+
163
+ def _compilePaletteTypes(self):
164
+ if self.version == 0 or not any(self.paletteTypes):
165
+ return b""
166
+ assert len(self.paletteTypes) == len(self.palettes)
167
+ result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
168
+ assert len(result) == 4 * len(self.palettes)
169
+ return result
170
+
171
+ def _compilePaletteLabels(self):
172
+ if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
173
+ return b""
174
+ assert len(self.paletteLabels) == len(self.palettes)
175
+ result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
176
+ assert len(result) == 2 * len(self.palettes)
177
+ return result
178
+
179
+ def _compilePaletteEntryLabels(self):
180
+ if self.version == 0 or all(
181
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
182
+ ):
183
+ return b""
184
+ assert len(self.paletteEntryLabels) == self.numPaletteEntries
185
+ result = bytesjoin(
186
+ [struct.pack(">H", label) for label in self.paletteEntryLabels]
187
+ )
188
+ assert len(result) == 2 * self.numPaletteEntries
189
+ return result
190
+
191
+ def toXML(self, writer, ttFont):
192
+ numPalettes = len(self.palettes)
193
+ paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
194
+ paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
195
+ writer.simpletag("version", value=self.version)
196
+ writer.newline()
197
+ writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
198
+ writer.newline()
199
+ for index, palette in enumerate(self.palettes):
200
+ attrs = {"index": index}
201
+ paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
202
+ paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
203
+ if self.version > 0 and paletteLabel != self.NO_NAME_ID:
204
+ attrs["label"] = paletteLabel
205
+ if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
206
+ attrs["type"] = paletteType
207
+ writer.begintag("palette", **attrs)
208
+ writer.newline()
209
+ if (
210
+ self.version > 0
211
+ and paletteLabel != self.NO_NAME_ID
212
+ and ttFont
213
+ and "name" in ttFont
214
+ ):
215
+ name = ttFont["name"].getDebugName(paletteLabel)
216
+ if name is not None:
217
+ writer.comment(name)
218
+ writer.newline()
219
+ assert len(palette) == self.numPaletteEntries
220
+ for cindex, color in enumerate(palette):
221
+ color.toXML(writer, ttFont, cindex)
222
+ writer.endtag("palette")
223
+ writer.newline()
224
+ if self.version > 0 and not all(
225
+ l == self.NO_NAME_ID for l in self.paletteEntryLabels
226
+ ):
227
+ writer.begintag("paletteEntryLabels")
228
+ writer.newline()
229
+ for index, label in enumerate(self.paletteEntryLabels):
230
+ if label != self.NO_NAME_ID:
231
+ writer.simpletag("label", index=index, value=label)
232
+ if self.version > 0 and label and ttFont and "name" in ttFont:
233
+ name = ttFont["name"].getDebugName(label)
234
+ if name is not None:
235
+ writer.comment(name)
236
+ writer.newline()
237
+ writer.endtag("paletteEntryLabels")
238
+ writer.newline()
239
+
240
+ def fromXML(self, name, attrs, content, ttFont):
241
+ if name == "palette":
242
+ self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
243
+ self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
244
+ palette = []
245
+ for element in content:
246
+ if isinstance(element, str):
247
+ continue
248
+ attrs = element[1]
249
+ color = Color.fromHex(attrs["value"])
250
+ palette.append(color)
251
+ self.palettes.append(palette)
252
+ elif name == "paletteEntryLabels":
253
+ colorLabels = {}
254
+ for element in content:
255
+ if isinstance(element, str):
256
+ continue
257
+ elementName, elementAttr, _ = element
258
+ if elementName == "label":
259
+ labelIndex = safeEval(elementAttr["index"])
260
+ nameID = safeEval(elementAttr["value"])
261
+ colorLabels[labelIndex] = nameID
262
+ self.paletteEntryLabels = [
263
+ colorLabels.get(i, self.NO_NAME_ID)
264
+ for i in range(self.numPaletteEntries)
265
+ ]
266
+ elif "value" in attrs:
267
+ value = safeEval(attrs["value"])
268
+ setattr(self, name, value)
269
+ if name == "numPaletteEntries":
270
+ self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
271
+
272
+
273
+ class Color(namedtuple("Color", "blue green red alpha")):
274
+ def hex(self):
275
+ return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
276
+
277
+ def __repr__(self):
278
+ return self.hex()
279
+
280
+ def toXML(self, writer, ttFont, index=None):
281
+ writer.simpletag("color", value=self.hex(), index=index)
282
+ writer.newline()
283
+
284
+ @classmethod
285
+ def fromHex(cls, value):
286
+ if value[0] == "#":
287
+ value = value[1:]
288
+ red = int(value[0:2], 16)
289
+ green = int(value[2:4], 16)
290
+ blue = int(value[4:6], 16)
291
+ alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
292
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
293
+
294
+ @classmethod
295
+ def fromRGBA(cls, red, green, blue, alpha):
296
+ return cls(red=red, green=green, blue=blue, alpha=alpha)
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ from . import DefaultTable
4
+
5
+
6
+ class table_D__e_b_g(DefaultTable.DefaultTable):
7
+ def decompile(self, data, ttFont):
8
+ self.data = json.loads(data)
9
+
10
+ def compile(self, ttFont):
11
+ return json.dumps(self.data).encode("utf-8")
12
+
13
+ def toXML(self, writer, ttFont):
14
+ writer.writecdata(json.dumps(self.data, indent=2))
15
+
16
+ def fromXML(self, name, attrs, content, ttFont):
17
+ self.data = json.loads(content)
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_D_T_.py ADDED
@@ -0,0 +1,827 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import (
3
+ bytechr,
4
+ byteord,
5
+ bytesjoin,
6
+ strjoin,
7
+ safeEval,
8
+ readHex,
9
+ hexStr,
10
+ deHexStr,
11
+ )
12
+ from .BitmapGlyphMetrics import (
13
+ BigGlyphMetrics,
14
+ bigGlyphMetricsFormat,
15
+ SmallGlyphMetrics,
16
+ smallGlyphMetricsFormat,
17
+ )
18
+ from . import DefaultTable
19
+ import itertools
20
+ import os
21
+ import struct
22
+ import logging
23
+
24
+
25
+ log = logging.getLogger(__name__)
26
+
27
+ ebdtTableVersionFormat = """
28
+ > # big endian
29
+ version: 16.16F
30
+ """
31
+
32
+ ebdtComponentFormat = """
33
+ > # big endian
34
+ glyphCode: H
35
+ xOffset: b
36
+ yOffset: b
37
+ """
38
+
39
+
40
+ class table_E_B_D_T_(DefaultTable.DefaultTable):
41
+ # Keep a reference to the name of the data locator table.
42
+ locatorName = "EBLC"
43
+
44
+ # This method can be overridden in subclasses to support new formats
45
+ # without changing the other implementation. Also can be used as a
46
+ # convenience method for coverting a font file to an alternative format.
47
+ def getImageFormatClass(self, imageFormat):
48
+ return ebdt_bitmap_classes[imageFormat]
49
+
50
+ def decompile(self, data, ttFont):
51
+ # Get the version but don't advance the slice.
52
+ # Most of the lookup for this table is done relative
53
+ # to the begining so slice by the offsets provided
54
+ # in the EBLC table.
55
+ sstruct.unpack2(ebdtTableVersionFormat, data, self)
56
+
57
+ # Keep a dict of glyphs that have been seen so they aren't remade.
58
+ # This dict maps intervals of data to the BitmapGlyph.
59
+ glyphDict = {}
60
+
61
+ # Pull out the EBLC table and loop through glyphs.
62
+ # A strike is a concept that spans both tables.
63
+ # The actual bitmap data is stored in the EBDT.
64
+ locator = ttFont[self.__class__.locatorName]
65
+ self.strikeData = []
66
+ for curStrike in locator.strikes:
67
+ bitmapGlyphDict = {}
68
+ self.strikeData.append(bitmapGlyphDict)
69
+ for indexSubTable in curStrike.indexSubTables:
70
+ dataIter = zip(indexSubTable.names, indexSubTable.locations)
71
+ for curName, curLoc in dataIter:
72
+ # Don't create duplicate data entries for the same glyphs.
73
+ # Instead just use the structures that already exist if they exist.
74
+ if curLoc in glyphDict:
75
+ curGlyph = glyphDict[curLoc]
76
+ else:
77
+ curGlyphData = data[slice(*curLoc)]
78
+ imageFormatClass = self.getImageFormatClass(
79
+ indexSubTable.imageFormat
80
+ )
81
+ curGlyph = imageFormatClass(curGlyphData, ttFont)
82
+ glyphDict[curLoc] = curGlyph
83
+ bitmapGlyphDict[curName] = curGlyph
84
+
85
+ def compile(self, ttFont):
86
+ dataList = []
87
+ dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
88
+ dataSize = len(dataList[0])
89
+
90
+ # Keep a dict of glyphs that have been seen so they aren't remade.
91
+ # This dict maps the id of the BitmapGlyph to the interval
92
+ # in the data.
93
+ glyphDict = {}
94
+
95
+ # Go through the bitmap glyph data. Just in case the data for a glyph
96
+ # changed the size metrics should be recalculated. There are a variety
97
+ # of formats and they get stored in the EBLC table. That is why
98
+ # recalculation is defered to the EblcIndexSubTable class and just
99
+ # pass what is known about bitmap glyphs from this particular table.
100
+ locator = ttFont[self.__class__.locatorName]
101
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
102
+ for curIndexSubTable in curStrike.indexSubTables:
103
+ dataLocations = []
104
+ for curName in curIndexSubTable.names:
105
+ # Handle the data placement based on seeing the glyph or not.
106
+ # Just save a reference to the location if the glyph has already
107
+ # been saved in compile. This code assumes that glyphs will only
108
+ # be referenced multiple times from indexFormat5. By luck the
109
+ # code may still work when referencing poorly ordered fonts with
110
+ # duplicate references. If there is a font that is unlucky the
111
+ # respective compile methods for the indexSubTables will fail
112
+ # their assertions. All fonts seem to follow this assumption.
113
+ # More complicated packing may be needed if a counter-font exists.
114
+ glyph = curGlyphDict[curName]
115
+ objectId = id(glyph)
116
+ if objectId not in glyphDict:
117
+ data = glyph.compile(ttFont)
118
+ data = curIndexSubTable.padBitmapData(data)
119
+ startByte = dataSize
120
+ dataSize += len(data)
121
+ endByte = dataSize
122
+ dataList.append(data)
123
+ dataLoc = (startByte, endByte)
124
+ glyphDict[objectId] = dataLoc
125
+ else:
126
+ dataLoc = glyphDict[objectId]
127
+ dataLocations.append(dataLoc)
128
+ # Just use the new data locations in the indexSubTable.
129
+ # The respective compile implementations will take care
130
+ # of any of the problems in the convertion that may arise.
131
+ curIndexSubTable.locations = dataLocations
132
+
133
+ return bytesjoin(dataList)
134
+
135
+ def toXML(self, writer, ttFont):
136
+ # When exporting to XML if one of the data export formats
137
+ # requires metrics then those metrics may be in the locator.
138
+ # In this case populate the bitmaps with "export metrics".
139
+ if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"):
140
+ locator = ttFont[self.__class__.locatorName]
141
+ for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
142
+ for curIndexSubTable in curStrike.indexSubTables:
143
+ for curName in curIndexSubTable.names:
144
+ glyph = curGlyphDict[curName]
145
+ # I'm not sure which metrics have priority here.
146
+ # For now if both metrics exist go with glyph metrics.
147
+ if hasattr(glyph, "metrics"):
148
+ glyph.exportMetrics = glyph.metrics
149
+ else:
150
+ glyph.exportMetrics = curIndexSubTable.metrics
151
+ glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
152
+
153
+ writer.simpletag("header", [("version", self.version)])
154
+ writer.newline()
155
+ locator = ttFont[self.__class__.locatorName]
156
+ for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
157
+ writer.begintag("strikedata", [("index", strikeIndex)])
158
+ writer.newline()
159
+ for curName, curBitmap in bitmapGlyphDict.items():
160
+ curBitmap.toXML(strikeIndex, curName, writer, ttFont)
161
+ writer.endtag("strikedata")
162
+ writer.newline()
163
+
164
+ def fromXML(self, name, attrs, content, ttFont):
165
+ if name == "header":
166
+ self.version = safeEval(attrs["version"])
167
+ elif name == "strikedata":
168
+ if not hasattr(self, "strikeData"):
169
+ self.strikeData = []
170
+ strikeIndex = safeEval(attrs["index"])
171
+
172
+ bitmapGlyphDict = {}
173
+ for element in content:
174
+ if not isinstance(element, tuple):
175
+ continue
176
+ name, attrs, content = element
177
+ if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
178
+ imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :])
179
+ glyphName = attrs["name"]
180
+ imageFormatClass = self.getImageFormatClass(imageFormat)
181
+ curGlyph = imageFormatClass(None, None)
182
+ curGlyph.fromXML(name, attrs, content, ttFont)
183
+ assert glyphName not in bitmapGlyphDict, (
184
+ "Duplicate glyphs with the same name '%s' in the same strike."
185
+ % glyphName
186
+ )
187
+ bitmapGlyphDict[glyphName] = curGlyph
188
+ else:
189
+ log.warning("%s being ignored by %s", name, self.__class__.__name__)
190
+
191
+ # Grow the strike data array to the appropriate size. The XML
192
+ # format allows the strike index value to be out of order.
193
+ if strikeIndex >= len(self.strikeData):
194
+ self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
195
+ assert (
196
+ self.strikeData[strikeIndex] is None
197
+ ), "Duplicate strike EBDT indices."
198
+ self.strikeData[strikeIndex] = bitmapGlyphDict
199
+
200
+
201
+ class EbdtComponent(object):
202
+ def toXML(self, writer, ttFont):
203
+ writer.begintag("ebdtComponent", [("name", self.name)])
204
+ writer.newline()
205
+ for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
206
+ writer.simpletag(componentName, value=getattr(self, componentName))
207
+ writer.newline()
208
+ writer.endtag("ebdtComponent")
209
+ writer.newline()
210
+
211
+ def fromXML(self, name, attrs, content, ttFont):
212
+ self.name = attrs["name"]
213
+ componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
214
+ for element in content:
215
+ if not isinstance(element, tuple):
216
+ continue
217
+ name, attrs, content = element
218
+ if name in componentNames:
219
+ vars(self)[name] = safeEval(attrs["value"])
220
+ else:
221
+ log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
222
+
223
+
224
+ # Helper functions for dealing with binary.
225
+
226
+
227
+ def _data2binary(data, numBits):
228
+ binaryList = []
229
+ for curByte in data:
230
+ value = byteord(curByte)
231
+ numBitsCut = min(8, numBits)
232
+ for i in range(numBitsCut):
233
+ if value & 0x1:
234
+ binaryList.append("1")
235
+ else:
236
+ binaryList.append("0")
237
+ value = value >> 1
238
+ numBits -= numBitsCut
239
+ return strjoin(binaryList)
240
+
241
+
242
+ def _binary2data(binary):
243
+ byteList = []
244
+ for bitLoc in range(0, len(binary), 8):
245
+ byteString = binary[bitLoc : bitLoc + 8]
246
+ curByte = 0
247
+ for curBit in reversed(byteString):
248
+ curByte = curByte << 1
249
+ if curBit == "1":
250
+ curByte |= 1
251
+ byteList.append(bytechr(curByte))
252
+ return bytesjoin(byteList)
253
+
254
+
255
+ def _memoize(f):
256
+ class memodict(dict):
257
+ def __missing__(self, key):
258
+ ret = f(key)
259
+ if isinstance(key, int) or len(key) == 1:
260
+ self[key] = ret
261
+ return ret
262
+
263
+ return memodict().__getitem__
264
+
265
+
266
+ # 00100111 -> 11100100 per byte, not to be confused with little/big endian.
267
+ # Bitmap data per byte is in the order that binary is written on the page
268
+ # with the least significant bit as far right as possible. This is the
269
+ # opposite of what makes sense algorithmically and hence this function.
270
+ @_memoize
271
+ def _reverseBytes(data):
272
+ r"""
273
+ >>> bin(ord(_reverseBytes(0b00100111)))
274
+ '0b11100100'
275
+ >>> _reverseBytes(b'\x00\xf0')
276
+ b'\x00\x0f'
277
+ """
278
+ if isinstance(data, bytes) and len(data) != 1:
279
+ return bytesjoin(map(_reverseBytes, data))
280
+ byte = byteord(data)
281
+ result = 0
282
+ for i in range(8):
283
+ result = result << 1
284
+ result |= byte & 1
285
+ byte = byte >> 1
286
+ return bytechr(result)
287
+
288
+
289
+ # This section of code is for reading and writing image data to/from XML.
290
+
291
+
292
+ def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
293
+ writer.begintag("rawimagedata")
294
+ writer.newline()
295
+ writer.dumphex(bitmapObject.imageData)
296
+ writer.endtag("rawimagedata")
297
+ writer.newline()
298
+
299
+
300
+ def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
301
+ bitmapObject.imageData = readHex(content)
302
+
303
+
304
+ def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
305
+ metrics = bitmapObject.exportMetrics
306
+ del bitmapObject.exportMetrics
307
+ bitDepth = bitmapObject.exportBitDepth
308
+ del bitmapObject.exportBitDepth
309
+
310
+ writer.begintag(
311
+ "rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
312
+ )
313
+ writer.newline()
314
+ for curRow in range(metrics.height):
315
+ rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
316
+ writer.simpletag("row", value=hexStr(rowData))
317
+ writer.newline()
318
+ writer.endtag("rowimagedata")
319
+ writer.newline()
320
+
321
+
322
+ def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
323
+ bitDepth = safeEval(attrs["bitDepth"])
324
+ metrics = SmallGlyphMetrics()
325
+ metrics.width = safeEval(attrs["width"])
326
+ metrics.height = safeEval(attrs["height"])
327
+
328
+ dataRows = []
329
+ for element in content:
330
+ if not isinstance(element, tuple):
331
+ continue
332
+ name, attr, content = element
333
+ # Chop off 'imagedata' from the tag to get just the option.
334
+ if name == "row":
335
+ dataRows.append(deHexStr(attr["value"]))
336
+ bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
337
+
338
+
339
+ def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
340
+ metrics = bitmapObject.exportMetrics
341
+ del bitmapObject.exportMetrics
342
+ bitDepth = bitmapObject.exportBitDepth
343
+ del bitmapObject.exportBitDepth
344
+
345
+ # A dict for mapping binary to more readable/artistic ASCII characters.
346
+ binaryConv = {"0": ".", "1": "@"}
347
+
348
+ writer.begintag(
349
+ "bitwiseimagedata",
350
+ bitDepth=bitDepth,
351
+ width=metrics.width,
352
+ height=metrics.height,
353
+ )
354
+ writer.newline()
355
+ for curRow in range(metrics.height):
356
+ rowData = bitmapObject.getRow(
357
+ curRow, bitDepth=1, metrics=metrics, reverseBytes=True
358
+ )
359
+ rowData = _data2binary(rowData, metrics.width)
360
+ # Make the output a readable ASCII art form.
361
+ rowData = strjoin(map(binaryConv.get, rowData))
362
+ writer.simpletag("row", value=rowData)
363
+ writer.newline()
364
+ writer.endtag("bitwiseimagedata")
365
+ writer.newline()
366
+
367
+
368
+ def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
369
+ bitDepth = safeEval(attrs["bitDepth"])
370
+ metrics = SmallGlyphMetrics()
371
+ metrics.width = safeEval(attrs["width"])
372
+ metrics.height = safeEval(attrs["height"])
373
+
374
+ # A dict for mapping from ASCII to binary. All characters are considered
375
+ # a '1' except space, period and '0' which maps to '0'.
376
+ binaryConv = {" ": "0", ".": "0", "0": "0"}
377
+
378
+ dataRows = []
379
+ for element in content:
380
+ if not isinstance(element, tuple):
381
+ continue
382
+ name, attr, content = element
383
+ if name == "row":
384
+ mapParams = zip(attr["value"], itertools.repeat("1"))
385
+ rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
386
+ dataRows.append(_binary2data(rowData))
387
+
388
+ bitmapObject.setRows(
389
+ dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
390
+ )
391
+
392
+
393
+ def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
394
+ try:
395
+ folder = os.path.dirname(writer.file.name)
396
+ except AttributeError:
397
+ # fall back to current directory if output file's directory isn't found
398
+ folder = "."
399
+ folder = os.path.join(folder, "bitmaps")
400
+ filename = glyphName + bitmapObject.fileExtension
401
+ if not os.path.isdir(folder):
402
+ os.makedirs(folder)
403
+ folder = os.path.join(folder, "strike%d" % strikeIndex)
404
+ if not os.path.isdir(folder):
405
+ os.makedirs(folder)
406
+
407
+ fullPath = os.path.join(folder, filename)
408
+ writer.simpletag("extfileimagedata", value=fullPath)
409
+ writer.newline()
410
+
411
+ with open(fullPath, "wb") as file:
412
+ file.write(bitmapObject.imageData)
413
+
414
+
415
+ def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
416
+ fullPath = attrs["value"]
417
+ with open(fullPath, "rb") as file:
418
+ bitmapObject.imageData = file.read()
419
+
420
+
421
+ # End of XML writing code.
422
+
423
+ # Important information about the naming scheme. Used for identifying formats
424
+ # in XML.
425
+ _bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_"
426
+
427
+
428
+ class BitmapGlyph(object):
429
+ # For the external file format. This can be changed in subclasses. This way
430
+ # when the extfile option is turned on files have the form: glyphName.ext
431
+ # The default is just a flat binary file with no meaning.
432
+ fileExtension = ".bin"
433
+
434
+ # Keep track of reading and writing of various forms.
435
+ xmlDataFunctions = {
436
+ "raw": (_writeRawImageData, _readRawImageData),
437
+ "row": (_writeRowImageData, _readRowImageData),
438
+ "bitwise": (_writeBitwiseImageData, _readBitwiseImageData),
439
+ "extfile": (_writeExtFileImageData, _readExtFileImageData),
440
+ }
441
+
442
+ def __init__(self, data, ttFont):
443
+ self.data = data
444
+ self.ttFont = ttFont
445
+ # TODO Currently non-lazy decompilation is untested here...
446
+ # if not ttFont.lazy:
447
+ # self.decompile()
448
+ # del self.data
449
+
450
+ def __getattr__(self, attr):
451
+ # Allow lazy decompile.
452
+ if attr[:2] == "__":
453
+ raise AttributeError(attr)
454
+ if attr == "data":
455
+ raise AttributeError(attr)
456
+ self.decompile()
457
+ del self.data
458
+ return getattr(self, attr)
459
+
460
+ def ensureDecompiled(self, recurse=False):
461
+ if hasattr(self, "data"):
462
+ self.decompile()
463
+ del self.data
464
+
465
+ # Not a fan of this but it is needed for safer safety checking.
466
+ def getFormat(self):
467
+ return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :])
468
+
469
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
470
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
471
+ writer.newline()
472
+
473
+ self.writeMetrics(writer, ttFont)
474
+ # Use the internal write method to write using the correct output format.
475
+ self.writeData(strikeIndex, glyphName, writer, ttFont)
476
+
477
+ writer.endtag(self.__class__.__name__)
478
+ writer.newline()
479
+
480
+ def fromXML(self, name, attrs, content, ttFont):
481
+ self.readMetrics(name, attrs, content, ttFont)
482
+ for element in content:
483
+ if not isinstance(element, tuple):
484
+ continue
485
+ name, attr, content = element
486
+ if not name.endswith("imagedata"):
487
+ continue
488
+ # Chop off 'imagedata' from the tag to get just the option.
489
+ option = name[: -len("imagedata")]
490
+ assert option in self.__class__.xmlDataFunctions
491
+ self.readData(name, attr, content, ttFont)
492
+
493
+ # Some of the glyphs have the metrics. This allows for metrics to be
494
+ # added if the glyph format has them. Default behavior is to do nothing.
495
+ def writeMetrics(self, writer, ttFont):
496
+ pass
497
+
498
+ # The opposite of write metrics.
499
+ def readMetrics(self, name, attrs, content, ttFont):
500
+ pass
501
+
502
+ def writeData(self, strikeIndex, glyphName, writer, ttFont):
503
+ try:
504
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[
505
+ ttFont.bitmapGlyphDataFormat
506
+ ]
507
+ except KeyError:
508
+ writeFunc = _writeRawImageData
509
+ writeFunc(strikeIndex, glyphName, self, writer, ttFont)
510
+
511
+ def readData(self, name, attrs, content, ttFont):
512
+ # Chop off 'imagedata' from the tag to get just the option.
513
+ option = name[: -len("imagedata")]
514
+ writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
515
+ readFunc(self, name, attrs, content, ttFont)
516
+
517
+
518
+ # A closure for creating a mixin for the two types of metrics handling.
519
+ # Most of the code is very similar so its easier to deal with here.
520
+ # Everything works just by passing the class that the mixin is for.
521
+ def _createBitmapPlusMetricsMixin(metricsClass):
522
+ # Both metrics names are listed here to make meaningful error messages.
523
+ metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
524
+ curMetricsName = metricsClass.__name__
525
+ # Find which metrics this is for and determine the opposite name.
526
+ metricsId = metricStrings.index(curMetricsName)
527
+ oppositeMetricsName = metricStrings[1 - metricsId]
528
+
529
+ class BitmapPlusMetricsMixin(object):
530
+ def writeMetrics(self, writer, ttFont):
531
+ self.metrics.toXML(writer, ttFont)
532
+
533
+ def readMetrics(self, name, attrs, content, ttFont):
534
+ for element in content:
535
+ if not isinstance(element, tuple):
536
+ continue
537
+ name, attrs, content = element
538
+ if name == curMetricsName:
539
+ self.metrics = metricsClass()
540
+ self.metrics.fromXML(name, attrs, content, ttFont)
541
+ elif name == oppositeMetricsName:
542
+ log.warning(
543
+ "Warning: %s being ignored in format %d.",
544
+ oppositeMetricsName,
545
+ self.getFormat(),
546
+ )
547
+
548
+ return BitmapPlusMetricsMixin
549
+
550
+
551
+ # Since there are only two types of mixin's just create them here.
552
+ BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
553
+ BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
554
+
555
+
556
+ # Data that is bit aligned can be tricky to deal with. These classes implement
557
+ # helper functionality for dealing with the data and getting a particular row
558
+ # of bitwise data. Also helps implement fancy data export/import in XML.
559
+ class BitAlignedBitmapMixin(object):
560
+ def _getBitRange(self, row, bitDepth, metrics):
561
+ rowBits = bitDepth * metrics.width
562
+ bitOffset = row * rowBits
563
+ return (bitOffset, bitOffset + rowBits)
564
+
565
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
566
+ if metrics is None:
567
+ metrics = self.metrics
568
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
569
+
570
+ # Loop through each byte. This can cover two bytes in the original data or
571
+ # a single byte if things happen to be aligned. The very last entry might
572
+ # not be aligned so take care to trim the binary data to size and pad with
573
+ # zeros in the row data. Bit aligned data is somewhat tricky.
574
+ #
575
+ # Example of data cut. Data cut represented in x's.
576
+ # '|' represents byte boundary.
577
+ # data = ...0XX|XXXXXX00|000... => XXXXXXXX
578
+ # or
579
+ # data = ...0XX|XXXX0000|000... => XXXXXX00
580
+ # or
581
+ # data = ...000|XXXXXXXX|000... => XXXXXXXX
582
+ # or
583
+ # data = ...000|00XXXX00|000... => XXXX0000
584
+ #
585
+ dataList = []
586
+ bitRange = self._getBitRange(row, bitDepth, metrics)
587
+ stepRange = bitRange + (8,)
588
+ for curBit in range(*stepRange):
589
+ endBit = min(curBit + 8, bitRange[1])
590
+ numBits = endBit - curBit
591
+ cutPoint = curBit % 8
592
+ firstByteLoc = curBit // 8
593
+ secondByteLoc = endBit // 8
594
+ if firstByteLoc < secondByteLoc:
595
+ numBitsCut = 8 - cutPoint
596
+ else:
597
+ numBitsCut = endBit - curBit
598
+ curByte = _reverseBytes(self.imageData[firstByteLoc])
599
+ firstHalf = byteord(curByte) >> cutPoint
600
+ firstHalf = ((1 << numBitsCut) - 1) & firstHalf
601
+ newByte = firstHalf
602
+ if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
603
+ curByte = _reverseBytes(self.imageData[secondByteLoc])
604
+ secondHalf = byteord(curByte) << numBitsCut
605
+ newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
606
+ dataList.append(bytechr(newByte))
607
+
608
+ # The way the data is kept is opposite the algorithm used.
609
+ data = bytesjoin(dataList)
610
+ if not reverseBytes:
611
+ data = _reverseBytes(data)
612
+ return data
613
+
614
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
615
+ if metrics is None:
616
+ metrics = self.metrics
617
+ if not reverseBytes:
618
+ dataRows = list(map(_reverseBytes, dataRows))
619
+
620
+ # Keep track of a list of ordinal values as they are easier to modify
621
+ # than a list of strings. Map to actual strings later.
622
+ numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
623
+ ordDataList = [0] * numBytes
624
+ for row, data in enumerate(dataRows):
625
+ bitRange = self._getBitRange(row, bitDepth, metrics)
626
+ stepRange = bitRange + (8,)
627
+ for curBit, curByte in zip(range(*stepRange), data):
628
+ endBit = min(curBit + 8, bitRange[1])
629
+ cutPoint = curBit % 8
630
+ firstByteLoc = curBit // 8
631
+ secondByteLoc = endBit // 8
632
+ if firstByteLoc < secondByteLoc:
633
+ numBitsCut = 8 - cutPoint
634
+ else:
635
+ numBitsCut = endBit - curBit
636
+ curByte = byteord(curByte)
637
+ firstByte = curByte & ((1 << numBitsCut) - 1)
638
+ ordDataList[firstByteLoc] |= firstByte << cutPoint
639
+ if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
640
+ secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1)
641
+ ordDataList[secondByteLoc] |= secondByte
642
+
643
+ # Save the image data with the bits going the correct way.
644
+ self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
645
+
646
+
647
+ class ByteAlignedBitmapMixin(object):
648
+ def _getByteRange(self, row, bitDepth, metrics):
649
+ rowBytes = (bitDepth * metrics.width + 7) // 8
650
+ byteOffset = row * rowBytes
651
+ return (byteOffset, byteOffset + rowBytes)
652
+
653
+ def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
654
+ if metrics is None:
655
+ metrics = self.metrics
656
+ assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
657
+ byteRange = self._getByteRange(row, bitDepth, metrics)
658
+ data = self.imageData[slice(*byteRange)]
659
+ if reverseBytes:
660
+ data = _reverseBytes(data)
661
+ return data
662
+
663
+ def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
664
+ if metrics is None:
665
+ metrics = self.metrics
666
+ if reverseBytes:
667
+ dataRows = map(_reverseBytes, dataRows)
668
+ self.imageData = bytesjoin(dataRows)
669
+
670
+
671
+ class ebdt_bitmap_format_1(
672
+ ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
673
+ ):
674
+ def decompile(self):
675
+ self.metrics = SmallGlyphMetrics()
676
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
677
+ self.imageData = data
678
+
679
+ def compile(self, ttFont):
680
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
681
+ return data + self.imageData
682
+
683
+
684
+ class ebdt_bitmap_format_2(
685
+ BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
686
+ ):
687
+ def decompile(self):
688
+ self.metrics = SmallGlyphMetrics()
689
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
690
+ self.imageData = data
691
+
692
+ def compile(self, ttFont):
693
+ data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
694
+ return data + self.imageData
695
+
696
+
697
+ class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
698
+ def decompile(self):
699
+ self.imageData = self.data
700
+
701
+ def compile(self, ttFont):
702
+ return self.imageData
703
+
704
+
705
+ class ebdt_bitmap_format_6(
706
+ ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
707
+ ):
708
+ def decompile(self):
709
+ self.metrics = BigGlyphMetrics()
710
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
711
+ self.imageData = data
712
+
713
+ def compile(self, ttFont):
714
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
715
+ return data + self.imageData
716
+
717
+
718
+ class ebdt_bitmap_format_7(
719
+ BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
720
+ ):
721
+ def decompile(self):
722
+ self.metrics = BigGlyphMetrics()
723
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
724
+ self.imageData = data
725
+
726
+ def compile(self, ttFont):
727
+ data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
728
+ return data + self.imageData
729
+
730
+
731
+ class ComponentBitmapGlyph(BitmapGlyph):
732
+ def toXML(self, strikeIndex, glyphName, writer, ttFont):
733
+ writer.begintag(self.__class__.__name__, [("name", glyphName)])
734
+ writer.newline()
735
+
736
+ self.writeMetrics(writer, ttFont)
737
+
738
+ writer.begintag("components")
739
+ writer.newline()
740
+ for curComponent in self.componentArray:
741
+ curComponent.toXML(writer, ttFont)
742
+ writer.endtag("components")
743
+ writer.newline()
744
+
745
+ writer.endtag(self.__class__.__name__)
746
+ writer.newline()
747
+
748
+ def fromXML(self, name, attrs, content, ttFont):
749
+ self.readMetrics(name, attrs, content, ttFont)
750
+ for element in content:
751
+ if not isinstance(element, tuple):
752
+ continue
753
+ name, attr, content = element
754
+ if name == "components":
755
+ self.componentArray = []
756
+ for compElement in content:
757
+ if not isinstance(compElement, tuple):
758
+ continue
759
+ name, attrs, content = compElement
760
+ if name == "ebdtComponent":
761
+ curComponent = EbdtComponent()
762
+ curComponent.fromXML(name, attrs, content, ttFont)
763
+ self.componentArray.append(curComponent)
764
+ else:
765
+ log.warning("'%s' being ignored in component array.", name)
766
+
767
+
768
+ class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
769
+ def decompile(self):
770
+ self.metrics = SmallGlyphMetrics()
771
+ dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
772
+ data = data[1:]
773
+
774
+ (numComponents,) = struct.unpack(">H", data[:2])
775
+ data = data[2:]
776
+ self.componentArray = []
777
+ for i in range(numComponents):
778
+ curComponent = EbdtComponent()
779
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
780
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
781
+ self.componentArray.append(curComponent)
782
+
783
+ def compile(self, ttFont):
784
+ dataList = []
785
+ dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
786
+ dataList.append(b"\0")
787
+ dataList.append(struct.pack(">H", len(self.componentArray)))
788
+ for curComponent in self.componentArray:
789
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
790
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
791
+ return bytesjoin(dataList)
792
+
793
+
794
+ class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
795
+ def decompile(self):
796
+ self.metrics = BigGlyphMetrics()
797
+ dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
798
+ (numComponents,) = struct.unpack(">H", data[:2])
799
+ data = data[2:]
800
+ self.componentArray = []
801
+ for i in range(numComponents):
802
+ curComponent = EbdtComponent()
803
+ dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
804
+ curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
805
+ self.componentArray.append(curComponent)
806
+
807
+ def compile(self, ttFont):
808
+ dataList = []
809
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
810
+ dataList.append(struct.pack(">H", len(self.componentArray)))
811
+ for curComponent in self.componentArray:
812
+ curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
813
+ dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
814
+ return bytesjoin(dataList)
815
+
816
+
817
+ # Dictionary of bitmap formats to the class representing that format
818
+ # currently only the ones listed in this map are the ones supported.
819
+ ebdt_bitmap_classes = {
820
+ 1: ebdt_bitmap_format_1,
821
+ 2: ebdt_bitmap_format_2,
822
+ 5: ebdt_bitmap_format_5,
823
+ 6: ebdt_bitmap_format_6,
824
+ 7: ebdt_bitmap_format_7,
825
+ 8: ebdt_bitmap_format_8,
826
+ 9: ebdt_bitmap_format_9,
827
+ }
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from . import DefaultTable
3
+ from fontTools.misc.textTools import bytesjoin, safeEval
4
+ from .BitmapGlyphMetrics import (
5
+ BigGlyphMetrics,
6
+ bigGlyphMetricsFormat,
7
+ SmallGlyphMetrics,
8
+ smallGlyphMetricsFormat,
9
+ )
10
+ import struct
11
+ import itertools
12
+ from collections import deque
13
+ import logging
14
+
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+ eblcHeaderFormat = """
19
+ > # big endian
20
+ version: 16.16F
21
+ numSizes: I
22
+ """
23
+ # The table format string is split to handle sbitLineMetrics simply.
24
+ bitmapSizeTableFormatPart1 = """
25
+ > # big endian
26
+ indexSubTableArrayOffset: I
27
+ indexTablesSize: I
28
+ numberOfIndexSubTables: I
29
+ colorRef: I
30
+ """
31
+ # The compound type for hori and vert.
32
+ sbitLineMetricsFormat = """
33
+ > # big endian
34
+ ascender: b
35
+ descender: b
36
+ widthMax: B
37
+ caretSlopeNumerator: b
38
+ caretSlopeDenominator: b
39
+ caretOffset: b
40
+ minOriginSB: b
41
+ minAdvanceSB: b
42
+ maxBeforeBL: b
43
+ minAfterBL: b
44
+ pad1: b
45
+ pad2: b
46
+ """
47
+ # hori and vert go between the two parts.
48
+ bitmapSizeTableFormatPart2 = """
49
+ > # big endian
50
+ startGlyphIndex: H
51
+ endGlyphIndex: H
52
+ ppemX: B
53
+ ppemY: B
54
+ bitDepth: B
55
+ flags: b
56
+ """
57
+
58
+ indexSubTableArrayFormat = ">HHL"
59
+ indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
60
+
61
+ indexSubHeaderFormat = ">HHL"
62
+ indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
63
+
64
+ codeOffsetPairFormat = ">HH"
65
+ codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
66
+
67
+
68
+ class table_E_B_L_C_(DefaultTable.DefaultTable):
69
+ dependencies = ["EBDT"]
70
+
71
+ # This method can be overridden in subclasses to support new formats
72
+ # without changing the other implementation. Also can be used as a
73
+ # convenience method for coverting a font file to an alternative format.
74
+ def getIndexFormatClass(self, indexFormat):
75
+ return eblc_sub_table_classes[indexFormat]
76
+
77
+ def decompile(self, data, ttFont):
78
+ # Save the original data because offsets are from the start of the table.
79
+ origData = data
80
+ i = 0
81
+
82
+ dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
83
+ i += 8
84
+
85
+ self.strikes = []
86
+ for curStrikeIndex in range(self.numSizes):
87
+ curStrike = Strike()
88
+ self.strikes.append(curStrike)
89
+ curTable = curStrike.bitmapSizeTable
90
+ dummy = sstruct.unpack2(
91
+ bitmapSizeTableFormatPart1, data[i : i + 16], curTable
92
+ )
93
+ i += 16
94
+ for metric in ("hori", "vert"):
95
+ metricObj = SbitLineMetrics()
96
+ vars(curTable)[metric] = metricObj
97
+ dummy = sstruct.unpack2(
98
+ sbitLineMetricsFormat, data[i : i + 12], metricObj
99
+ )
100
+ i += 12
101
+ dummy = sstruct.unpack(
102
+ bitmapSizeTableFormatPart2, data[i : i + 8], curTable
103
+ )
104
+ i += 8
105
+
106
+ for curStrike in self.strikes:
107
+ curTable = curStrike.bitmapSizeTable
108
+ for subtableIndex in range(curTable.numberOfIndexSubTables):
109
+ i = (
110
+ curTable.indexSubTableArrayOffset
111
+ + subtableIndex * indexSubTableArraySize
112
+ )
113
+
114
+ tup = struct.unpack(
115
+ indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
116
+ )
117
+ (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
118
+ i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
119
+
120
+ tup = struct.unpack(
121
+ indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
122
+ )
123
+ (indexFormat, imageFormat, imageDataOffset) = tup
124
+
125
+ indexFormatClass = self.getIndexFormatClass(indexFormat)
126
+ indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
127
+ indexSubTable.firstGlyphIndex = firstGlyphIndex
128
+ indexSubTable.lastGlyphIndex = lastGlyphIndex
129
+ indexSubTable.additionalOffsetToIndexSubtable = (
130
+ additionalOffsetToIndexSubtable
131
+ )
132
+ indexSubTable.indexFormat = indexFormat
133
+ indexSubTable.imageFormat = imageFormat
134
+ indexSubTable.imageDataOffset = imageDataOffset
135
+ indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
136
+ curStrike.indexSubTables.append(indexSubTable)
137
+
138
+ def compile(self, ttFont):
139
+ dataList = []
140
+ self.numSizes = len(self.strikes)
141
+ dataList.append(sstruct.pack(eblcHeaderFormat, self))
142
+
143
+ # Data size of the header + bitmapSizeTable needs to be calculated
144
+ # in order to form offsets. This value will hold the size of the data
145
+ # in dataList after all the data is consolidated in dataList.
146
+ dataSize = len(dataList[0])
147
+
148
+ # The table will be structured in the following order:
149
+ # (0) header
150
+ # (1) Each bitmapSizeTable [1 ... self.numSizes]
151
+ # (2) Alternate between indexSubTableArray and indexSubTable
152
+ # for each bitmapSizeTable present.
153
+ #
154
+ # The issue is maintaining the proper offsets when table information
155
+ # gets moved around. All offsets and size information must be recalculated
156
+ # when building the table to allow editing within ttLib and also allow easy
157
+ # import/export to and from XML. All of this offset information is lost
158
+ # when exporting to XML so everything must be calculated fresh so importing
159
+ # from XML will work cleanly. Only byte offset and size information is
160
+ # calculated fresh. Count information like numberOfIndexSubTables is
161
+ # checked through assertions. If the information in this table was not
162
+ # touched or was changed properly then these types of values should match.
163
+ #
164
+ # The table will be rebuilt the following way:
165
+ # (0) Precompute the size of all the bitmapSizeTables. This is needed to
166
+ # compute the offsets properly.
167
+ # (1) For each bitmapSizeTable compute the indexSubTable and
168
+ # indexSubTableArray pair. The indexSubTable must be computed first
169
+ # so that the offset information in indexSubTableArray can be
170
+ # calculated. Update the data size after each pairing.
171
+ # (2) Build each bitmapSizeTable.
172
+ # (3) Consolidate all the data into the main dataList in the correct order.
173
+
174
+ for _ in self.strikes:
175
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
176
+ dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
177
+ dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
178
+
179
+ indexSubTablePairDataList = []
180
+ for curStrike in self.strikes:
181
+ curTable = curStrike.bitmapSizeTable
182
+ curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
183
+ curTable.indexSubTableArrayOffset = dataSize
184
+
185
+ # Precompute the size of the indexSubTableArray. This information
186
+ # is important for correctly calculating the new value for
187
+ # additionalOffsetToIndexSubtable.
188
+ sizeOfSubTableArray = (
189
+ curTable.numberOfIndexSubTables * indexSubTableArraySize
190
+ )
191
+ lowerBound = dataSize
192
+ dataSize += sizeOfSubTableArray
193
+ upperBound = dataSize
194
+
195
+ indexSubTableDataList = []
196
+ for indexSubTable in curStrike.indexSubTables:
197
+ indexSubTable.additionalOffsetToIndexSubtable = (
198
+ dataSize - curTable.indexSubTableArrayOffset
199
+ )
200
+ glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
201
+ indexSubTable.firstGlyphIndex = min(glyphIds)
202
+ indexSubTable.lastGlyphIndex = max(glyphIds)
203
+ data = indexSubTable.compile(ttFont)
204
+ indexSubTableDataList.append(data)
205
+ dataSize += len(data)
206
+ curTable.startGlyphIndex = min(
207
+ ist.firstGlyphIndex for ist in curStrike.indexSubTables
208
+ )
209
+ curTable.endGlyphIndex = max(
210
+ ist.lastGlyphIndex for ist in curStrike.indexSubTables
211
+ )
212
+
213
+ for i in curStrike.indexSubTables:
214
+ data = struct.pack(
215
+ indexSubHeaderFormat,
216
+ i.firstGlyphIndex,
217
+ i.lastGlyphIndex,
218
+ i.additionalOffsetToIndexSubtable,
219
+ )
220
+ indexSubTablePairDataList.append(data)
221
+ indexSubTablePairDataList.extend(indexSubTableDataList)
222
+ curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
223
+
224
+ for curStrike in self.strikes:
225
+ curTable = curStrike.bitmapSizeTable
226
+ data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
227
+ dataList.append(data)
228
+ for metric in ("hori", "vert"):
229
+ metricObj = vars(curTable)[metric]
230
+ data = sstruct.pack(sbitLineMetricsFormat, metricObj)
231
+ dataList.append(data)
232
+ data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
233
+ dataList.append(data)
234
+ dataList.extend(indexSubTablePairDataList)
235
+
236
+ return bytesjoin(dataList)
237
+
238
+ def toXML(self, writer, ttFont):
239
+ writer.simpletag("header", [("version", self.version)])
240
+ writer.newline()
241
+ for curIndex, curStrike in enumerate(self.strikes):
242
+ curStrike.toXML(curIndex, writer, ttFont)
243
+
244
+ def fromXML(self, name, attrs, content, ttFont):
245
+ if name == "header":
246
+ self.version = safeEval(attrs["version"])
247
+ elif name == "strike":
248
+ if not hasattr(self, "strikes"):
249
+ self.strikes = []
250
+ strikeIndex = safeEval(attrs["index"])
251
+ curStrike = Strike()
252
+ curStrike.fromXML(name, attrs, content, ttFont, self)
253
+
254
+ # Grow the strike array to the appropriate size. The XML format
255
+ # allows for the strike index value to be out of order.
256
+ if strikeIndex >= len(self.strikes):
257
+ self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
258
+ assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
259
+ self.strikes[strikeIndex] = curStrike
260
+
261
+
262
+ class Strike(object):
263
+ def __init__(self):
264
+ self.bitmapSizeTable = BitmapSizeTable()
265
+ self.indexSubTables = []
266
+
267
+ def toXML(self, strikeIndex, writer, ttFont):
268
+ writer.begintag("strike", [("index", strikeIndex)])
269
+ writer.newline()
270
+ self.bitmapSizeTable.toXML(writer, ttFont)
271
+ writer.comment(
272
+ "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
273
+ )
274
+ writer.newline()
275
+ for indexSubTable in self.indexSubTables:
276
+ indexSubTable.toXML(writer, ttFont)
277
+ writer.endtag("strike")
278
+ writer.newline()
279
+
280
+ def fromXML(self, name, attrs, content, ttFont, locator):
281
+ for element in content:
282
+ if not isinstance(element, tuple):
283
+ continue
284
+ name, attrs, content = element
285
+ if name == "bitmapSizeTable":
286
+ self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
287
+ elif name.startswith(_indexSubTableSubclassPrefix):
288
+ indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
289
+ indexFormatClass = locator.getIndexFormatClass(indexFormat)
290
+ indexSubTable = indexFormatClass(None, None)
291
+ indexSubTable.indexFormat = indexFormat
292
+ indexSubTable.fromXML(name, attrs, content, ttFont)
293
+ self.indexSubTables.append(indexSubTable)
294
+
295
+
296
+ class BitmapSizeTable(object):
297
+ # Returns all the simple metric names that bitmap size table
298
+ # cares about in terms of XML creation.
299
+ def _getXMLMetricNames(self):
300
+ dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
301
+ dataNames = {**dataNames, **sstruct.getformat(bitmapSizeTableFormatPart2)[1]}
302
+ # Skip the first 3 data names because they are byte offsets and counts.
303
+ return list(dataNames.keys())[3:]
304
+
305
+ def toXML(self, writer, ttFont):
306
+ writer.begintag("bitmapSizeTable")
307
+ writer.newline()
308
+ for metric in ("hori", "vert"):
309
+ getattr(self, metric).toXML(metric, writer, ttFont)
310
+ for metricName in self._getXMLMetricNames():
311
+ writer.simpletag(metricName, value=getattr(self, metricName))
312
+ writer.newline()
313
+ writer.endtag("bitmapSizeTable")
314
+ writer.newline()
315
+
316
+ def fromXML(self, name, attrs, content, ttFont):
317
+ # Create a lookup for all the simple names that make sense to
318
+ # bitmap size table. Only read the information from these names.
319
+ dataNames = set(self._getXMLMetricNames())
320
+ for element in content:
321
+ if not isinstance(element, tuple):
322
+ continue
323
+ name, attrs, content = element
324
+ if name == "sbitLineMetrics":
325
+ direction = attrs["direction"]
326
+ assert direction in (
327
+ "hori",
328
+ "vert",
329
+ ), "SbitLineMetrics direction specified invalid."
330
+ metricObj = SbitLineMetrics()
331
+ metricObj.fromXML(name, attrs, content, ttFont)
332
+ vars(self)[direction] = metricObj
333
+ elif name in dataNames:
334
+ vars(self)[name] = safeEval(attrs["value"])
335
+ else:
336
+ log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
337
+
338
+
339
+ class SbitLineMetrics(object):
340
+ def toXML(self, name, writer, ttFont):
341
+ writer.begintag("sbitLineMetrics", [("direction", name)])
342
+ writer.newline()
343
+ for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
344
+ writer.simpletag(metricName, value=getattr(self, metricName))
345
+ writer.newline()
346
+ writer.endtag("sbitLineMetrics")
347
+ writer.newline()
348
+
349
+ def fromXML(self, name, attrs, content, ttFont):
350
+ metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
351
+ for element in content:
352
+ if not isinstance(element, tuple):
353
+ continue
354
+ name, attrs, content = element
355
+ if name in metricNames:
356
+ vars(self)[name] = safeEval(attrs["value"])
357
+
358
+
359
+ # Important information about the naming scheme. Used for identifying subtables.
360
+ _indexSubTableSubclassPrefix = "eblc_index_sub_table_"
361
+
362
+
363
+ class EblcIndexSubTable(object):
364
+ def __init__(self, data, ttFont):
365
+ self.data = data
366
+ self.ttFont = ttFont
367
+ # TODO Currently non-lazy decompiling doesn't work for this class...
368
+ # if not ttFont.lazy:
369
+ # self.decompile()
370
+ # del self.data, self.ttFont
371
+
372
+ def __getattr__(self, attr):
373
+ # Allow lazy decompile.
374
+ if attr[:2] == "__":
375
+ raise AttributeError(attr)
376
+ if attr == "data":
377
+ raise AttributeError(attr)
378
+ self.decompile()
379
+ return getattr(self, attr)
380
+
381
+ def ensureDecompiled(self, recurse=False):
382
+ if hasattr(self, "data"):
383
+ self.decompile()
384
+
385
+ # This method just takes care of the indexSubHeader. Implementing subclasses
386
+ # should call it to compile the indexSubHeader and then continue compiling
387
+ # the remainder of their unique format.
388
+ def compile(self, ttFont):
389
+ return struct.pack(
390
+ indexSubHeaderFormat,
391
+ self.indexFormat,
392
+ self.imageFormat,
393
+ self.imageDataOffset,
394
+ )
395
+
396
+ # Creates the XML for bitmap glyphs. Each index sub table basically makes
397
+ # the same XML except for specific metric information that is written
398
+ # out via a method call that a subclass implements optionally.
399
+ def toXML(self, writer, ttFont):
400
+ writer.begintag(
401
+ self.__class__.__name__,
402
+ [
403
+ ("imageFormat", self.imageFormat),
404
+ ("firstGlyphIndex", self.firstGlyphIndex),
405
+ ("lastGlyphIndex", self.lastGlyphIndex),
406
+ ],
407
+ )
408
+ writer.newline()
409
+ self.writeMetrics(writer, ttFont)
410
+ # Write out the names as thats all thats needed to rebuild etc.
411
+ # For font debugging of consecutive formats the ids are also written.
412
+ # The ids are not read when moving from the XML format.
413
+ glyphIds = map(ttFont.getGlyphID, self.names)
414
+ for glyphName, glyphId in zip(self.names, glyphIds):
415
+ writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
416
+ writer.newline()
417
+ writer.endtag(self.__class__.__name__)
418
+ writer.newline()
419
+
420
+ def fromXML(self, name, attrs, content, ttFont):
421
+ # Read all the attributes. Even though the glyph indices are
422
+ # recalculated, they are still read in case there needs to
423
+ # be an immediate export of the data.
424
+ self.imageFormat = safeEval(attrs["imageFormat"])
425
+ self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
426
+ self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
427
+
428
+ self.readMetrics(name, attrs, content, ttFont)
429
+
430
+ self.names = []
431
+ for element in content:
432
+ if not isinstance(element, tuple):
433
+ continue
434
+ name, attrs, content = element
435
+ if name == "glyphLoc":
436
+ self.names.append(attrs["name"])
437
+
438
+ # A helper method that writes the metrics for the index sub table. It also
439
+ # is responsible for writing the image size for fixed size data since fixed
440
+ # size is not recalculated on compile. Default behavior is to do nothing.
441
+ def writeMetrics(self, writer, ttFont):
442
+ pass
443
+
444
+ # A helper method that is the inverse of writeMetrics.
445
+ def readMetrics(self, name, attrs, content, ttFont):
446
+ pass
447
+
448
+ # This method is for fixed glyph data sizes. There are formats where
449
+ # the glyph data is fixed but are actually composite glyphs. To handle
450
+ # this the font spec in indexSubTable makes the data the size of the
451
+ # fixed size by padding the component arrays. This function abstracts
452
+ # out this padding process. Input is data unpadded. Output is data
453
+ # padded only in fixed formats. Default behavior is to return the data.
454
+ def padBitmapData(self, data):
455
+ return data
456
+
457
+ # Remove any of the glyph locations and names that are flagged as skipped.
458
+ # This only occurs in formats {1,3}.
459
+ def removeSkipGlyphs(self):
460
+ # Determines if a name, location pair is a valid data location.
461
+ # Skip glyphs are marked when the size is equal to zero.
462
+ def isValidLocation(args):
463
+ (name, (startByte, endByte)) = args
464
+ return startByte < endByte
465
+
466
+ # Remove all skip glyphs.
467
+ dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
468
+ self.names, self.locations = list(map(list, zip(*dataPairs)))
469
+
470
+
471
+ # A closure for creating a custom mixin. This is done because formats 1 and 3
472
+ # are very similar. The only difference between them is the size per offset
473
+ # value. Code put in here should handle both cases generally.
474
+ def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
475
+ # Prep the data size for the offset array data format.
476
+ dataFormat = ">" + formatStringForDataType
477
+ offsetDataSize = struct.calcsize(dataFormat)
478
+
479
+ class OffsetArrayIndexSubTableMixin(object):
480
+ def decompile(self):
481
+ numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
482
+ indexingOffsets = [
483
+ glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
484
+ ]
485
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
486
+ offsetArray = [
487
+ struct.unpack(dataFormat, self.data[slice(*loc)])[0]
488
+ for loc in indexingLocations
489
+ ]
490
+
491
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
492
+ modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
493
+ self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
494
+
495
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
496
+ self.removeSkipGlyphs()
497
+ del self.data, self.ttFont
498
+
499
+ def compile(self, ttFont):
500
+ # First make sure that all the data lines up properly. Formats 1 and 3
501
+ # must have all its data lined up consecutively. If not this will fail.
502
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
503
+ assert (
504
+ curLoc[1] == nxtLoc[0]
505
+ ), "Data must be consecutive in indexSubTable offset formats"
506
+
507
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
508
+ # Make sure that all ids are sorted strictly increasing.
509
+ assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
510
+
511
+ # Run a simple algorithm to add skip glyphs to the data locations at
512
+ # the places where an id is not present.
513
+ idQueue = deque(glyphIds)
514
+ locQueue = deque(self.locations)
515
+ allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
516
+ allLocations = []
517
+ for curId in allGlyphIds:
518
+ if curId != idQueue[0]:
519
+ allLocations.append((locQueue[0][0], locQueue[0][0]))
520
+ else:
521
+ idQueue.popleft()
522
+ allLocations.append(locQueue.popleft())
523
+
524
+ # Now that all the locations are collected, pack them appropriately into
525
+ # offsets. This is the form where offset[i] is the location and
526
+ # offset[i+1]-offset[i] is the size of the data location.
527
+ offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
528
+ # Image data offset must be less than or equal to the minimum of locations.
529
+ # This offset may change the value for round tripping but is safer and
530
+ # allows imageDataOffset to not be required to be in the XML version.
531
+ self.imageDataOffset = min(offsets)
532
+ offsetArray = [offset - self.imageDataOffset for offset in offsets]
533
+
534
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
535
+ dataList += [
536
+ struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
537
+ ]
538
+ # Take care of any padding issues. Only occurs in format 3.
539
+ if offsetDataSize * len(offsetArray) % 4 != 0:
540
+ dataList.append(struct.pack(dataFormat, 0))
541
+ return bytesjoin(dataList)
542
+
543
+ return OffsetArrayIndexSubTableMixin
544
+
545
+
546
+ # A Mixin for functionality shared between the different kinds
547
+ # of fixed sized data handling. Both kinds have big metrics so
548
+ # that kind of special processing is also handled in this mixin.
549
+ class FixedSizeIndexSubTableMixin(object):
550
+ def writeMetrics(self, writer, ttFont):
551
+ writer.simpletag("imageSize", value=self.imageSize)
552
+ writer.newline()
553
+ self.metrics.toXML(writer, ttFont)
554
+
555
+ def readMetrics(self, name, attrs, content, ttFont):
556
+ for element in content:
557
+ if not isinstance(element, tuple):
558
+ continue
559
+ name, attrs, content = element
560
+ if name == "imageSize":
561
+ self.imageSize = safeEval(attrs["value"])
562
+ elif name == BigGlyphMetrics.__name__:
563
+ self.metrics = BigGlyphMetrics()
564
+ self.metrics.fromXML(name, attrs, content, ttFont)
565
+ elif name == SmallGlyphMetrics.__name__:
566
+ log.warning(
567
+ "SmallGlyphMetrics being ignored in format %d.", self.indexFormat
568
+ )
569
+
570
+ def padBitmapData(self, data):
571
+ # Make sure that the data isn't bigger than the fixed size.
572
+ assert len(data) <= self.imageSize, (
573
+ "Data in indexSubTable format %d must be less than the fixed size."
574
+ % self.indexFormat
575
+ )
576
+ # Pad the data so that it matches the fixed size.
577
+ pad = (self.imageSize - len(data)) * b"\0"
578
+ return data + pad
579
+
580
+
581
+ class eblc_index_sub_table_1(
582
+ _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
583
+ ):
584
+ pass
585
+
586
+
587
+ class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
588
+ def decompile(self):
589
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
590
+ self.metrics = BigGlyphMetrics()
591
+ sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
592
+ glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
593
+ offsets = [
594
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
595
+ ]
596
+ self.locations = list(zip(offsets, offsets[1:]))
597
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
598
+ del self.data, self.ttFont
599
+
600
+ def compile(self, ttFont):
601
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
602
+ # Make sure all the ids are consecutive. This is required by Format 2.
603
+ assert glyphIds == list(
604
+ range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
605
+ ), "Format 2 ids must be consecutive."
606
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
607
+
608
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
609
+ dataList.append(struct.pack(">L", self.imageSize))
610
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
611
+ return bytesjoin(dataList)
612
+
613
+
614
+ class eblc_index_sub_table_3(
615
+ _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
616
+ ):
617
+ pass
618
+
619
+
620
+ class eblc_index_sub_table_4(EblcIndexSubTable):
621
+ def decompile(self):
622
+ (numGlyphs,) = struct.unpack(">L", self.data[:4])
623
+ data = self.data[4:]
624
+ indexingOffsets = [
625
+ glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
626
+ ]
627
+ indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
628
+ glyphArray = [
629
+ struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
630
+ for loc in indexingLocations
631
+ ]
632
+ glyphIds, offsets = list(map(list, zip(*glyphArray)))
633
+ # There are one too many glyph ids. Get rid of the last one.
634
+ glyphIds.pop()
635
+
636
+ offsets = [offset + self.imageDataOffset for offset in offsets]
637
+ self.locations = list(zip(offsets, offsets[1:]))
638
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
639
+ del self.data, self.ttFont
640
+
641
+ def compile(self, ttFont):
642
+ # First make sure that all the data lines up properly. Format 4
643
+ # must have all its data lined up consecutively. If not this will fail.
644
+ for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
645
+ assert (
646
+ curLoc[1] == nxtLoc[0]
647
+ ), "Data must be consecutive in indexSubTable format 4"
648
+
649
+ offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
650
+ # Image data offset must be less than or equal to the minimum of locations.
651
+ # Resetting this offset may change the value for round tripping but is safer
652
+ # and allows imageDataOffset to not be required to be in the XML version.
653
+ self.imageDataOffset = min(offsets)
654
+ offsets = [offset - self.imageDataOffset for offset in offsets]
655
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
656
+ # Create an iterator over the ids plus a padding value.
657
+ idsPlusPad = list(itertools.chain(glyphIds, [0]))
658
+
659
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
660
+ dataList.append(struct.pack(">L", len(glyphIds)))
661
+ tmp = [
662
+ struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
663
+ ]
664
+ dataList += tmp
665
+ data = bytesjoin(dataList)
666
+ return data
667
+
668
+
669
+ class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
670
+ def decompile(self):
671
+ self.origDataLen = 0
672
+ (self.imageSize,) = struct.unpack(">L", self.data[:4])
673
+ data = self.data[4:]
674
+ self.metrics, data = sstruct.unpack2(
675
+ bigGlyphMetricsFormat, data, BigGlyphMetrics()
676
+ )
677
+ (numGlyphs,) = struct.unpack(">L", data[:4])
678
+ data = data[4:]
679
+ glyphIds = [
680
+ struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
681
+ ]
682
+
683
+ offsets = [
684
+ self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
685
+ ]
686
+ self.locations = list(zip(offsets, offsets[1:]))
687
+ self.names = list(map(self.ttFont.getGlyphName, glyphIds))
688
+ del self.data, self.ttFont
689
+
690
+ def compile(self, ttFont):
691
+ self.imageDataOffset = min(next(iter(zip(*self.locations))))
692
+ dataList = [EblcIndexSubTable.compile(self, ttFont)]
693
+ dataList.append(struct.pack(">L", self.imageSize))
694
+ dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
695
+ glyphIds = list(map(ttFont.getGlyphID, self.names))
696
+ dataList.append(struct.pack(">L", len(glyphIds)))
697
+ dataList += [struct.pack(">H", curId) for curId in glyphIds]
698
+ if len(glyphIds) % 2 == 1:
699
+ dataList.append(struct.pack(">H", 0))
700
+ return bytesjoin(dataList)
701
+
702
+
703
+ # Dictionary of indexFormat to the class representing that format.
704
+ eblc_sub_table_classes = {
705
+ 1: eblc_index_sub_table_1,
706
+ 2: eblc_index_sub_table_2,
707
+ 3: eblc_index_sub_table_3,
708
+ 4: eblc_index_sub_table_4,
709
+ 5: eblc_index_sub_table_5,
710
+ }
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import safeEval
3
+ from fontTools.misc.timeTools import timestampFromString, timestampToString
4
+ from . import DefaultTable
5
+
6
+ FFTMFormat = """
7
+ > # big endian
8
+ version: I
9
+ FFTimeStamp: Q
10
+ sourceCreated: Q
11
+ sourceModified: Q
12
+ """
13
+
14
+
15
+ class table_F_F_T_M_(DefaultTable.DefaultTable):
16
+ def decompile(self, data, ttFont):
17
+ dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
18
+
19
+ def compile(self, ttFont):
20
+ data = sstruct.pack(FFTMFormat, self)
21
+ return data
22
+
23
+ def toXML(self, writer, ttFont):
24
+ writer.comment(
25
+ "FontForge's timestamp, font source creation and modification dates"
26
+ )
27
+ writer.newline()
28
+ formatstring, names, fixes = sstruct.getformat(FFTMFormat)
29
+ for name in names:
30
+ value = getattr(self, name)
31
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
32
+ value = timestampToString(value)
33
+ writer.simpletag(name, value=value)
34
+ writer.newline()
35
+
36
+ def fromXML(self, name, attrs, content, ttFont):
37
+ value = attrs["value"]
38
+ if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
39
+ value = timestampFromString(value)
40
+ else:
41
+ value = safeEval(value)
42
+ setattr(self, name, value)
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_G_D_E_F_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_K_G_.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import bytesjoin, safeEval, readHex
3
+ from . import DefaultTable
4
+ import sys
5
+ import array
6
+
7
+ GPKGFormat = """
8
+ > # big endian
9
+ version: H
10
+ flags: H
11
+ numGMAPs: H
12
+ numGlyplets: H
13
+ """
14
+ # psFontName is a byte string which follows the record above. This is zero padded
15
+ # to the beginning of the records array. The recordsOffsst is 32 bit aligned.
16
+
17
+
18
+ class table_G_P_K_G_(DefaultTable.DefaultTable):
19
+ def decompile(self, data, ttFont):
20
+ dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
21
+
22
+ GMAPoffsets = array.array("I")
23
+ endPos = (self.numGMAPs + 1) * 4
24
+ GMAPoffsets.frombytes(newData[:endPos])
25
+ if sys.byteorder != "big":
26
+ GMAPoffsets.byteswap()
27
+ self.GMAPs = []
28
+ for i in range(self.numGMAPs):
29
+ start = GMAPoffsets[i]
30
+ end = GMAPoffsets[i + 1]
31
+ self.GMAPs.append(data[start:end])
32
+ pos = endPos
33
+ endPos = pos + (self.numGlyplets + 1) * 4
34
+ glyphletOffsets = array.array("I")
35
+ glyphletOffsets.frombytes(newData[pos:endPos])
36
+ if sys.byteorder != "big":
37
+ glyphletOffsets.byteswap()
38
+ self.glyphlets = []
39
+ for i in range(self.numGlyplets):
40
+ start = glyphletOffsets[i]
41
+ end = glyphletOffsets[i + 1]
42
+ self.glyphlets.append(data[start:end])
43
+
44
+ def compile(self, ttFont):
45
+ self.numGMAPs = len(self.GMAPs)
46
+ self.numGlyplets = len(self.glyphlets)
47
+ GMAPoffsets = [0] * (self.numGMAPs + 1)
48
+ glyphletOffsets = [0] * (self.numGlyplets + 1)
49
+
50
+ dataList = [sstruct.pack(GPKGFormat, self)]
51
+
52
+ pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
53
+ GMAPoffsets[0] = pos
54
+ for i in range(1, self.numGMAPs + 1):
55
+ pos += len(self.GMAPs[i - 1])
56
+ GMAPoffsets[i] = pos
57
+ gmapArray = array.array("I", GMAPoffsets)
58
+ if sys.byteorder != "big":
59
+ gmapArray.byteswap()
60
+ dataList.append(gmapArray.tobytes())
61
+
62
+ glyphletOffsets[0] = pos
63
+ for i in range(1, self.numGlyplets + 1):
64
+ pos += len(self.glyphlets[i - 1])
65
+ glyphletOffsets[i] = pos
66
+ glyphletArray = array.array("I", glyphletOffsets)
67
+ if sys.byteorder != "big":
68
+ glyphletArray.byteswap()
69
+ dataList.append(glyphletArray.tobytes())
70
+ dataList += self.GMAPs
71
+ dataList += self.glyphlets
72
+ data = bytesjoin(dataList)
73
+ return data
74
+
75
+ def toXML(self, writer, ttFont):
76
+ writer.comment("Most of this table will be recalculated by the compiler")
77
+ writer.newline()
78
+ formatstring, names, fixes = sstruct.getformat(GPKGFormat)
79
+ for name in names:
80
+ value = getattr(self, name)
81
+ writer.simpletag(name, value=value)
82
+ writer.newline()
83
+
84
+ writer.begintag("GMAPs")
85
+ writer.newline()
86
+ for gmapData in self.GMAPs:
87
+ writer.begintag("hexdata")
88
+ writer.newline()
89
+ writer.dumphex(gmapData)
90
+ writer.endtag("hexdata")
91
+ writer.newline()
92
+ writer.endtag("GMAPs")
93
+ writer.newline()
94
+
95
+ writer.begintag("glyphlets")
96
+ writer.newline()
97
+ for glyphletData in self.glyphlets:
98
+ writer.begintag("hexdata")
99
+ writer.newline()
100
+ writer.dumphex(glyphletData)
101
+ writer.endtag("hexdata")
102
+ writer.newline()
103
+ writer.endtag("glyphlets")
104
+ writer.newline()
105
+
106
+ def fromXML(self, name, attrs, content, ttFont):
107
+ if name == "GMAPs":
108
+ if not hasattr(self, "GMAPs"):
109
+ self.GMAPs = []
110
+ for element in content:
111
+ if isinstance(element, str):
112
+ continue
113
+ itemName, itemAttrs, itemContent = element
114
+ if itemName == "hexdata":
115
+ self.GMAPs.append(readHex(itemContent))
116
+ elif name == "glyphlets":
117
+ if not hasattr(self, "glyphlets"):
118
+ self.glyphlets = []
119
+ for element in content:
120
+ if isinstance(element, str):
121
+ continue
122
+ itemName, itemAttrs, itemContent = element
123
+ if itemName == "hexdata":
124
+ self.glyphlets.append(readHex(itemContent))
125
+ else:
126
+ setattr(self, name, safeEval(attrs["value"]))
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_G_P_O_S_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.fixedTools import floatToFixedToStr
3
+ from fontTools.misc.textTools import safeEval
4
+
5
+ # from itertools import *
6
+ from functools import partial
7
+ from . import DefaultTable
8
+ from . import grUtils
9
+ import struct
10
+
11
+
12
+ Glat_format_0 = """
13
+ > # big endian
14
+ version: 16.16F
15
+ """
16
+
17
+ Glat_format_3 = """
18
+ >
19
+ version: 16.16F
20
+ compression:L # compression scheme or reserved
21
+ """
22
+
23
+ Glat_format_1_entry = """
24
+ >
25
+ attNum: B # Attribute number of first attribute
26
+ num: B # Number of attributes in this run
27
+ """
28
+ Glat_format_23_entry = """
29
+ >
30
+ attNum: H # Attribute number of first attribute
31
+ num: H # Number of attributes in this run
32
+ """
33
+
34
+ Glat_format_3_octabox_metrics = """
35
+ >
36
+ subboxBitmap: H # Which subboxes exist on 4x4 grid
37
+ diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
38
+ diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
39
+ diagPosMin: B # Defines minimum positively-sloped diagonal (di)
40
+ diagPosMax: B # Defines maximum positively-sloped diagonal (da)
41
+ """
42
+
43
+ Glat_format_3_subbox_entry = """
44
+ >
45
+ left: B # xi
46
+ right: B # xa
47
+ bottom: B # yi
48
+ top: B # ya
49
+ diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
50
+ diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
51
+ diagPosMin: B # Defines minimum positively-sloped diagonal (di)
52
+ diagPosMax: B # Defines maximum positively-sloped diagonal (da)
53
+ """
54
+
55
+
56
+ class _Object:
57
+ pass
58
+
59
+
60
+ class _Dict(dict):
61
+ pass
62
+
63
+
64
+ class table_G__l_a_t(DefaultTable.DefaultTable):
65
+ """
66
+ Support Graphite Glat tables
67
+ """
68
+
69
+ def __init__(self, tag=None):
70
+ DefaultTable.DefaultTable.__init__(self, tag)
71
+ self.scheme = 0
72
+
73
+ def decompile(self, data, ttFont):
74
+ sstruct.unpack2(Glat_format_0, data, self)
75
+ self.version = float(floatToFixedToStr(self.version, precisionBits=16))
76
+ if self.version <= 1.9:
77
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
78
+ elif self.version <= 2.9:
79
+ decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
80
+ elif self.version >= 3.0:
81
+ (data, self.scheme) = grUtils.decompress(data)
82
+ sstruct.unpack2(Glat_format_3, data, self)
83
+ self.hasOctaboxes = (self.compression & 1) == 1
84
+ decoder = self.decompileAttributes3
85
+
86
+ gloc = ttFont["Gloc"]
87
+ self.attributes = {}
88
+ count = 0
89
+ for s, e in zip(gloc, gloc[1:]):
90
+ self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
91
+ count += 1
92
+
93
+ def decompileAttributes12(self, data, fmt):
94
+ attributes = _Dict()
95
+ while len(data) > 3:
96
+ e, data = sstruct.unpack2(fmt, data, _Object())
97
+ keys = range(e.attNum, e.attNum + e.num)
98
+ if len(data) >= 2 * e.num:
99
+ vals = struct.unpack_from((">%dh" % e.num), data)
100
+ attributes.update(zip(keys, vals))
101
+ data = data[2 * e.num :]
102
+ return attributes
103
+
104
+ def decompileAttributes3(self, data):
105
+ if self.hasOctaboxes:
106
+ o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
107
+ numsub = bin(o.subboxBitmap).count("1")
108
+ o.subboxes = []
109
+ for b in range(numsub):
110
+ if len(data) >= 8:
111
+ subbox, data = sstruct.unpack2(
112
+ Glat_format_3_subbox_entry, data, _Object()
113
+ )
114
+ o.subboxes.append(subbox)
115
+ attrs = self.decompileAttributes12(data, Glat_format_23_entry)
116
+ if self.hasOctaboxes:
117
+ attrs.octabox = o
118
+ return attrs
119
+
120
+ def compile(self, ttFont):
121
+ data = sstruct.pack(Glat_format_0, self)
122
+ if self.version <= 1.9:
123
+ encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
124
+ elif self.version <= 2.9:
125
+ encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
126
+ elif self.version >= 3.0:
127
+ self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
128
+ data = sstruct.pack(Glat_format_3, self)
129
+ encoder = self.compileAttributes3
130
+
131
+ glocs = []
132
+ for n in range(len(self.attributes)):
133
+ glocs.append(len(data))
134
+ data += encoder(self.attributes[ttFont.getGlyphName(n)])
135
+ glocs.append(len(data))
136
+ ttFont["Gloc"].set(glocs)
137
+
138
+ if self.version >= 3.0:
139
+ data = grUtils.compress(self.scheme, data)
140
+ return data
141
+
142
+ def compileAttributes12(self, attrs, fmt):
143
+ data = b""
144
+ for e in grUtils.entries(attrs):
145
+ data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
146
+ (">%dh" % len(e[2])), *e[2]
147
+ )
148
+ return data
149
+
150
+ def compileAttributes3(self, attrs):
151
+ if self.hasOctaboxes:
152
+ o = attrs.octabox
153
+ data = sstruct.pack(Glat_format_3_octabox_metrics, o)
154
+ numsub = bin(o.subboxBitmap).count("1")
155
+ for b in range(numsub):
156
+ data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
157
+ else:
158
+ data = ""
159
+ return data + self.compileAttributes12(attrs, Glat_format_23_entry)
160
+
161
+ def toXML(self, writer, ttFont):
162
+ writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
163
+ writer.newline()
164
+ for n, a in sorted(
165
+ self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
166
+ ):
167
+ writer.begintag("glyph", name=n)
168
+ writer.newline()
169
+ if hasattr(a, "octabox"):
170
+ o = a.octabox
171
+ formatstring, names, fixes = sstruct.getformat(
172
+ Glat_format_3_octabox_metrics
173
+ )
174
+ vals = {}
175
+ for k in names:
176
+ if k == "subboxBitmap":
177
+ continue
178
+ vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
179
+ vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
180
+ writer.begintag("octaboxes", **vals)
181
+ writer.newline()
182
+ formatstring, names, fixes = sstruct.getformat(
183
+ Glat_format_3_subbox_entry
184
+ )
185
+ for s in o.subboxes:
186
+ vals = {}
187
+ for k in names:
188
+ vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
189
+ writer.simpletag("octabox", **vals)
190
+ writer.newline()
191
+ writer.endtag("octaboxes")
192
+ writer.newline()
193
+ for k, v in sorted(a.items()):
194
+ writer.simpletag("attribute", index=k, value=v)
195
+ writer.newline()
196
+ writer.endtag("glyph")
197
+ writer.newline()
198
+
199
+ def fromXML(self, name, attrs, content, ttFont):
200
+ if name == "version":
201
+ self.version = float(safeEval(attrs["version"]))
202
+ self.scheme = int(safeEval(attrs["compressionScheme"]))
203
+ if name != "glyph":
204
+ return
205
+ if not hasattr(self, "attributes"):
206
+ self.attributes = {}
207
+ gname = attrs["name"]
208
+ attributes = _Dict()
209
+ for element in content:
210
+ if not isinstance(element, tuple):
211
+ continue
212
+ tag, attrs, subcontent = element
213
+ if tag == "attribute":
214
+ k = int(safeEval(attrs["index"]))
215
+ v = int(safeEval(attrs["value"]))
216
+ attributes[k] = v
217
+ elif tag == "octaboxes":
218
+ self.hasOctaboxes = True
219
+ o = _Object()
220
+ o.subboxBitmap = int(attrs["bitmap"], 16)
221
+ o.subboxes = []
222
+ del attrs["bitmap"]
223
+ for k, v in attrs.items():
224
+ setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
225
+ for element in subcontent:
226
+ if not isinstance(element, tuple):
227
+ continue
228
+ (tag, attrs, subcontent) = element
229
+ so = _Object()
230
+ for k, v in attrs.items():
231
+ setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
232
+ o.subboxes.append(so)
233
+ attributes.octabox = o
234
+ self.attributes[gname] = attributes
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_J_S_T_F_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc import sstruct
2
+ from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
3
+ from . import DefaultTable
4
+
5
+ SINGFormat = """
6
+ > # big endian
7
+ tableVersionMajor: H
8
+ tableVersionMinor: H
9
+ glyphletVersion: H
10
+ permissions: h
11
+ mainGID: H
12
+ unitsPerEm: H
13
+ vertAdvance: h
14
+ vertOrigin: h
15
+ uniqueName: 28s
16
+ METAMD5: 16s
17
+ nameLength: 1s
18
+ """
19
+ # baseGlyphName is a byte string which follows the record above.
20
+
21
+
22
+ class table_S_I_N_G_(DefaultTable.DefaultTable):
23
+ dependencies = []
24
+
25
+ def decompile(self, data, ttFont):
26
+ dummy, rest = sstruct.unpack2(SINGFormat, data, self)
27
+ self.uniqueName = self.decompileUniqueName(self.uniqueName)
28
+ self.nameLength = byteord(self.nameLength)
29
+ assert len(rest) == self.nameLength
30
+ self.baseGlyphName = tostr(rest)
31
+
32
+ rawMETAMD5 = self.METAMD5
33
+ self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
34
+ for char in rawMETAMD5[1:]:
35
+ self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
36
+ self.METAMD5 = self.METAMD5 + "]"
37
+
38
+ def decompileUniqueName(self, data):
39
+ name = ""
40
+ for char in data:
41
+ val = byteord(char)
42
+ if val == 0:
43
+ break
44
+ if (val > 31) or (val < 128):
45
+ name += chr(val)
46
+ else:
47
+ octString = oct(val)
48
+ if len(octString) > 3:
49
+ octString = octString[1:] # chop off that leading zero.
50
+ elif len(octString) < 3:
51
+ octString.zfill(3)
52
+ name += "\\" + octString
53
+ return name
54
+
55
+ def compile(self, ttFont):
56
+ d = self.__dict__.copy()
57
+ d["nameLength"] = bytechr(len(self.baseGlyphName))
58
+ d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
59
+ METAMD5List = eval(self.METAMD5)
60
+ d["METAMD5"] = b""
61
+ for val in METAMD5List:
62
+ d["METAMD5"] += bytechr(val)
63
+ assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
64
+ data = sstruct.pack(SINGFormat, d)
65
+ data = data + tobytes(self.baseGlyphName)
66
+ return data
67
+
68
+ def compilecompileUniqueName(self, name, length):
69
+ nameLen = len(name)
70
+ if length <= nameLen:
71
+ name = name[: length - 1] + "\000"
72
+ else:
73
+ name += (nameLen - length) * "\000"
74
+ return name
75
+
76
+ def toXML(self, writer, ttFont):
77
+ writer.comment("Most of this table will be recalculated by the compiler")
78
+ writer.newline()
79
+ formatstring, names, fixes = sstruct.getformat(SINGFormat)
80
+ for name in names:
81
+ value = getattr(self, name)
82
+ writer.simpletag(name, value=value)
83
+ writer.newline()
84
+ writer.simpletag("baseGlyphName", value=self.baseGlyphName)
85
+ writer.newline()
86
+
87
+ def fromXML(self, name, attrs, content, ttFont):
88
+ value = attrs["value"]
89
+ if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
90
+ setattr(self, name, value)
91
+ else:
92
+ setattr(self, name, safeEval(value))
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_S_T_A_T_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_B_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .T_S_I_V_ import table_T_S_I_V_
2
+
3
+
4
+ class table_T_S_I_B_(table_T_S_I_V_):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_D_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .T_S_I_V_ import table_T_S_I_V_
2
+
3
+
4
+ class table_T_S_I_D_(table_T_S_I_V_):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .T_S_I_V_ import table_T_S_I_V_
2
+
3
+
4
+ class table_T_S_I_P_(table_T_S_I_V_):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
2
+ tool to store its hinting source data.
3
+
4
+ TSI1 contains the text of the glyph programs in the form of low-level assembly
5
+ code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
6
+ """
7
+
8
+ from . import DefaultTable
9
+ from fontTools.misc.loggingTools import LogMixin
10
+ from fontTools.misc.textTools import strjoin, tobytes, tostr
11
+
12
+
13
+ class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
14
+ extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
15
+
16
+ indextable = "TSI0"
17
+
18
+ def decompile(self, data, ttFont):
19
+ totalLength = len(data)
20
+ indextable = ttFont[self.indextable]
21
+ for indices, isExtra in zip(
22
+ (indextable.indices, indextable.extra_indices), (False, True)
23
+ ):
24
+ programs = {}
25
+ for i, (glyphID, textLength, textOffset) in enumerate(indices):
26
+ if isExtra:
27
+ name = self.extras[glyphID]
28
+ else:
29
+ name = ttFont.getGlyphName(glyphID)
30
+ if textOffset > totalLength:
31
+ self.log.warning("textOffset > totalLength; %r skipped" % name)
32
+ continue
33
+ if textLength < 0x8000:
34
+ # If the length stored in the record is less than 32768, then use
35
+ # that as the length of the record.
36
+ pass
37
+ elif textLength == 0x8000:
38
+ # If the length is 32768, compute the actual length as follows:
39
+ isLast = i == (len(indices) - 1)
40
+ if isLast:
41
+ if isExtra:
42
+ # For the last "extra" record (the very last record of the
43
+ # table), the length is the difference between the total
44
+ # length of the TSI1 table and the textOffset of the final
45
+ # record.
46
+ nextTextOffset = totalLength
47
+ else:
48
+ # For the last "normal" record (the last record just prior
49
+ # to the record containing the "magic number"), the length
50
+ # is the difference between the textOffset of the record
51
+ # following the "magic number" (0xFFFE) record (i.e. the
52
+ # first "extra" record), and the textOffset of the last
53
+ # "normal" record.
54
+ nextTextOffset = indextable.extra_indices[0][2]
55
+ else:
56
+ # For all other records with a length of 0x8000, the length is
57
+ # the difference between the textOffset of the record in
58
+ # question and the textOffset of the next record.
59
+ nextTextOffset = indices[i + 1][2]
60
+ assert nextTextOffset >= textOffset, "entries not sorted by offset"
61
+ if nextTextOffset > totalLength:
62
+ self.log.warning(
63
+ "nextTextOffset > totalLength; %r truncated" % name
64
+ )
65
+ nextTextOffset = totalLength
66
+ textLength = nextTextOffset - textOffset
67
+ else:
68
+ from fontTools import ttLib
69
+
70
+ raise ttLib.TTLibError(
71
+ "%r textLength (%d) must not be > 32768" % (name, textLength)
72
+ )
73
+ text = data[textOffset : textOffset + textLength]
74
+ assert len(text) == textLength
75
+ text = tostr(text, encoding="utf-8")
76
+ if text:
77
+ programs[name] = text
78
+ if isExtra:
79
+ self.extraPrograms = programs
80
+ else:
81
+ self.glyphPrograms = programs
82
+
83
+ def compile(self, ttFont):
84
+ if not hasattr(self, "glyphPrograms"):
85
+ self.glyphPrograms = {}
86
+ self.extraPrograms = {}
87
+ data = b""
88
+ indextable = ttFont[self.indextable]
89
+ glyphNames = ttFont.getGlyphOrder()
90
+
91
+ indices = []
92
+ for i in range(len(glyphNames)):
93
+ if len(data) % 2:
94
+ data = (
95
+ data + b"\015"
96
+ ) # align on 2-byte boundaries, fill with return chars. Yum.
97
+ name = glyphNames[i]
98
+ if name in self.glyphPrograms:
99
+ text = tobytes(self.glyphPrograms[name], encoding="utf-8")
100
+ else:
101
+ text = b""
102
+ textLength = len(text)
103
+ if textLength >= 0x8000:
104
+ textLength = 0x8000
105
+ indices.append((i, textLength, len(data)))
106
+ data = data + text
107
+
108
+ extra_indices = []
109
+ codes = sorted(self.extras.items())
110
+ for i in range(len(codes)):
111
+ if len(data) % 2:
112
+ data = (
113
+ data + b"\015"
114
+ ) # align on 2-byte boundaries, fill with return chars.
115
+ code, name = codes[i]
116
+ if name in self.extraPrograms:
117
+ text = tobytes(self.extraPrograms[name], encoding="utf-8")
118
+ else:
119
+ text = b""
120
+ textLength = len(text)
121
+ if textLength >= 0x8000:
122
+ textLength = 0x8000
123
+ extra_indices.append((code, textLength, len(data)))
124
+ data = data + text
125
+ indextable.set(indices, extra_indices)
126
+ return data
127
+
128
+ def toXML(self, writer, ttFont):
129
+ names = sorted(self.glyphPrograms.keys())
130
+ writer.newline()
131
+ for name in names:
132
+ text = self.glyphPrograms[name]
133
+ if not text:
134
+ continue
135
+ writer.begintag("glyphProgram", name=name)
136
+ writer.newline()
137
+ writer.write_noindent(text.replace("\r", "\n"))
138
+ writer.newline()
139
+ writer.endtag("glyphProgram")
140
+ writer.newline()
141
+ writer.newline()
142
+ extra_names = sorted(self.extraPrograms.keys())
143
+ for name in extra_names:
144
+ text = self.extraPrograms[name]
145
+ if not text:
146
+ continue
147
+ writer.begintag("extraProgram", name=name)
148
+ writer.newline()
149
+ writer.write_noindent(text.replace("\r", "\n"))
150
+ writer.newline()
151
+ writer.endtag("extraProgram")
152
+ writer.newline()
153
+ writer.newline()
154
+
155
+ def fromXML(self, name, attrs, content, ttFont):
156
+ if not hasattr(self, "glyphPrograms"):
157
+ self.glyphPrograms = {}
158
+ self.extraPrograms = {}
159
+ lines = strjoin(content).replace("\r", "\n").split("\n")
160
+ text = "\r".join(lines[1:-1])
161
+ if name == "glyphProgram":
162
+ self.glyphPrograms[attrs["name"]] = text
163
+ elif name == "extraProgram":
164
+ self.extraPrograms[attrs["name"]] = text
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__5.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
2
+ tool to store its hinting source data.
3
+
4
+ TSI5 contains the VTT character groups.
5
+ """
6
+
7
+ from fontTools.misc.textTools import safeEval
8
+ from . import DefaultTable
9
+ import sys
10
+ import array
11
+
12
+
13
+ class table_T_S_I__5(DefaultTable.DefaultTable):
14
+ def decompile(self, data, ttFont):
15
+ numGlyphs = ttFont["maxp"].numGlyphs
16
+ assert len(data) == 2 * numGlyphs
17
+ a = array.array("H")
18
+ a.frombytes(data)
19
+ if sys.byteorder != "big":
20
+ a.byteswap()
21
+ self.glyphGrouping = {}
22
+ for i in range(numGlyphs):
23
+ self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
24
+
25
+ def compile(self, ttFont):
26
+ glyphNames = ttFont.getGlyphOrder()
27
+ a = array.array("H")
28
+ for i in range(len(glyphNames)):
29
+ a.append(self.glyphGrouping.get(glyphNames[i], 0))
30
+ if sys.byteorder != "big":
31
+ a.byteswap()
32
+ return a.tobytes()
33
+
34
+ def toXML(self, writer, ttFont):
35
+ names = sorted(self.glyphGrouping.keys())
36
+ for glyphName in names:
37
+ writer.simpletag(
38
+ "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]
39
+ )
40
+ writer.newline()
41
+
42
+ def fromXML(self, name, attrs, content, ttFont):
43
+ if not hasattr(self, "glyphGrouping"):
44
+ self.glyphGrouping = {}
45
+ if name != "glyphgroup":
46
+ return
47
+ self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py ADDED
@@ -0,0 +1,846 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fontTools.misc.fixedTools import (
2
+ fixedToFloat as fi2fl,
3
+ floatToFixed as fl2fi,
4
+ floatToFixedToStr as fl2str,
5
+ strToFixedToFloat as str2fl,
6
+ otRound,
7
+ )
8
+ from fontTools.misc.textTools import safeEval
9
+ import array
10
+ from collections import Counter, defaultdict
11
+ import io
12
+ import logging
13
+ import struct
14
+ import sys
15
+
16
+
17
+ # https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
18
+
19
+ EMBEDDED_PEAK_TUPLE = 0x8000
20
+ INTERMEDIATE_REGION = 0x4000
21
+ PRIVATE_POINT_NUMBERS = 0x2000
22
+
23
+ DELTAS_ARE_ZERO = 0x80
24
+ DELTAS_ARE_WORDS = 0x40
25
+ DELTAS_ARE_LONGS = 0xC0
26
+ DELTAS_SIZE_MASK = 0xC0
27
+ DELTA_RUN_COUNT_MASK = 0x3F
28
+
29
+ POINTS_ARE_WORDS = 0x80
30
+ POINT_RUN_COUNT_MASK = 0x7F
31
+
32
+ TUPLES_SHARE_POINT_NUMBERS = 0x8000
33
+ TUPLE_COUNT_MASK = 0x0FFF
34
+ TUPLE_INDEX_MASK = 0x0FFF
35
+
36
+ log = logging.getLogger(__name__)
37
+
38
+
39
+ class TupleVariation(object):
40
+ def __init__(self, axes, coordinates):
41
+ self.axes = axes.copy()
42
+ self.coordinates = list(coordinates)
43
+
44
+ def __repr__(self):
45
+ axes = ",".join(
46
+ sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
47
+ )
48
+ return "<TupleVariation %s %s>" % (axes, self.coordinates)
49
+
50
+ def __eq__(self, other):
51
+ return self.coordinates == other.coordinates and self.axes == other.axes
52
+
53
+ def getUsedPoints(self):
54
+ # Empty set means "all points used".
55
+ if None not in self.coordinates:
56
+ return frozenset()
57
+ used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
58
+ # Return None if no points used.
59
+ return used if used else None
60
+
61
+ def hasImpact(self):
62
+ """Returns True if this TupleVariation has any visible impact.
63
+
64
+ If the result is False, the TupleVariation can be omitted from the font
65
+ without making any visible difference.
66
+ """
67
+ return any(c is not None for c in self.coordinates)
68
+
69
+ def toXML(self, writer, axisTags):
70
+ writer.begintag("tuple")
71
+ writer.newline()
72
+ for axis in axisTags:
73
+ value = self.axes.get(axis)
74
+ if value is not None:
75
+ minValue, value, maxValue = value
76
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
77
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
78
+ if minValue == defaultMinValue and maxValue == defaultMaxValue:
79
+ writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
80
+ else:
81
+ attrs = [
82
+ ("axis", axis),
83
+ ("min", fl2str(minValue, 14)),
84
+ ("value", fl2str(value, 14)),
85
+ ("max", fl2str(maxValue, 14)),
86
+ ]
87
+ writer.simpletag("coord", attrs)
88
+ writer.newline()
89
+ wrote_any_deltas = False
90
+ for i, delta in enumerate(self.coordinates):
91
+ if type(delta) == tuple and len(delta) == 2:
92
+ writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
93
+ writer.newline()
94
+ wrote_any_deltas = True
95
+ elif type(delta) == int:
96
+ writer.simpletag("delta", cvt=i, value=delta)
97
+ writer.newline()
98
+ wrote_any_deltas = True
99
+ elif delta is not None:
100
+ log.error("bad delta format")
101
+ writer.comment("bad delta #%d" % i)
102
+ writer.newline()
103
+ wrote_any_deltas = True
104
+ if not wrote_any_deltas:
105
+ writer.comment("no deltas")
106
+ writer.newline()
107
+ writer.endtag("tuple")
108
+ writer.newline()
109
+
110
+ def fromXML(self, name, attrs, _content):
111
+ if name == "coord":
112
+ axis = attrs["axis"]
113
+ value = str2fl(attrs["value"], 14)
114
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
115
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
116
+ minValue = str2fl(attrs.get("min", defaultMinValue), 14)
117
+ maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
118
+ self.axes[axis] = (minValue, value, maxValue)
119
+ elif name == "delta":
120
+ if "pt" in attrs:
121
+ point = safeEval(attrs["pt"])
122
+ x = safeEval(attrs["x"])
123
+ y = safeEval(attrs["y"])
124
+ self.coordinates[point] = (x, y)
125
+ elif "cvt" in attrs:
126
+ cvt = safeEval(attrs["cvt"])
127
+ value = safeEval(attrs["value"])
128
+ self.coordinates[cvt] = value
129
+ else:
130
+ log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
131
+
132
+ def compile(self, axisTags, sharedCoordIndices={}, pointData=None):
133
+ assert set(self.axes.keys()) <= set(axisTags), (
134
+ "Unknown axis tag found.",
135
+ self.axes.keys(),
136
+ axisTags,
137
+ )
138
+
139
+ tupleData = []
140
+ auxData = []
141
+
142
+ if pointData is None:
143
+ usedPoints = self.getUsedPoints()
144
+ if usedPoints is None: # Nothing to encode
145
+ return b"", b""
146
+ pointData = self.compilePoints(usedPoints)
147
+
148
+ coord = self.compileCoord(axisTags)
149
+ flags = sharedCoordIndices.get(coord)
150
+ if flags is None:
151
+ flags = EMBEDDED_PEAK_TUPLE
152
+ tupleData.append(coord)
153
+
154
+ intermediateCoord = self.compileIntermediateCoord(axisTags)
155
+ if intermediateCoord is not None:
156
+ flags |= INTERMEDIATE_REGION
157
+ tupleData.append(intermediateCoord)
158
+
159
+ # pointData of b'' implies "use shared points".
160
+ if pointData:
161
+ flags |= PRIVATE_POINT_NUMBERS
162
+ auxData.append(pointData)
163
+
164
+ auxData.append(self.compileDeltas())
165
+ auxData = b"".join(auxData)
166
+
167
+ tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
168
+ return b"".join(tupleData), auxData
169
+
170
+ def compileCoord(self, axisTags):
171
+ result = []
172
+ axes = self.axes
173
+ for axis in axisTags:
174
+ triple = axes.get(axis)
175
+ if triple is None:
176
+ result.append(b"\0\0")
177
+ else:
178
+ result.append(struct.pack(">h", fl2fi(triple[1], 14)))
179
+ return b"".join(result)
180
+
181
+ def compileIntermediateCoord(self, axisTags):
182
+ needed = False
183
+ for axis in axisTags:
184
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
185
+ defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
186
+ defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
187
+ if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
188
+ needed = True
189
+ break
190
+ if not needed:
191
+ return None
192
+ minCoords = []
193
+ maxCoords = []
194
+ for axis in axisTags:
195
+ minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
196
+ minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
197
+ maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
198
+ return b"".join(minCoords + maxCoords)
199
+
200
+ @staticmethod
201
+ def decompileCoord_(axisTags, data, offset):
202
+ coord = {}
203
+ pos = offset
204
+ for axis in axisTags:
205
+ coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
206
+ pos += 2
207
+ return coord, pos
208
+
209
+ @staticmethod
210
+ def compilePoints(points):
211
+ # If the set consists of all points in the glyph, it gets encoded with
212
+ # a special encoding: a single zero byte.
213
+ #
214
+ # To use this optimization, points passed in must be empty set.
215
+ # The following two lines are not strictly necessary as the main code
216
+ # below would emit the same. But this is most common and faster.
217
+ if not points:
218
+ return b"\0"
219
+
220
+ # In the 'gvar' table, the packing of point numbers is a little surprising.
221
+ # It consists of multiple runs, each being a delta-encoded list of integers.
222
+ # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
223
+ # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
224
+ # There are two types of runs, with values being either 8 or 16 bit unsigned
225
+ # integers.
226
+ points = list(points)
227
+ points.sort()
228
+ numPoints = len(points)
229
+
230
+ result = bytearray()
231
+ # The binary representation starts with the total number of points in the set,
232
+ # encoded into one or two bytes depending on the value.
233
+ if numPoints < 0x80:
234
+ result.append(numPoints)
235
+ else:
236
+ result.append((numPoints >> 8) | 0x80)
237
+ result.append(numPoints & 0xFF)
238
+
239
+ MAX_RUN_LENGTH = 127
240
+ pos = 0
241
+ lastValue = 0
242
+ while pos < numPoints:
243
+ runLength = 0
244
+
245
+ headerPos = len(result)
246
+ result.append(0)
247
+
248
+ useByteEncoding = None
249
+ while pos < numPoints and runLength <= MAX_RUN_LENGTH:
250
+ curValue = points[pos]
251
+ delta = curValue - lastValue
252
+ if useByteEncoding is None:
253
+ useByteEncoding = 0 <= delta <= 0xFF
254
+ if useByteEncoding and (delta > 0xFF or delta < 0):
255
+ # we need to start a new run (which will not use byte encoding)
256
+ break
257
+ # TODO This never switches back to a byte-encoding from a short-encoding.
258
+ # That's suboptimal.
259
+ if useByteEncoding:
260
+ result.append(delta)
261
+ else:
262
+ result.append(delta >> 8)
263
+ result.append(delta & 0xFF)
264
+ lastValue = curValue
265
+ pos += 1
266
+ runLength += 1
267
+ if useByteEncoding:
268
+ result[headerPos] = runLength - 1
269
+ else:
270
+ result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
271
+
272
+ return result
273
+
274
+ @staticmethod
275
+ def decompilePoints_(numPoints, data, offset, tableTag):
276
+ """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
277
+ assert tableTag in ("cvar", "gvar")
278
+ pos = offset
279
+ numPointsInData = data[pos]
280
+ pos += 1
281
+ if (numPointsInData & POINTS_ARE_WORDS) != 0:
282
+ numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
283
+ pos += 1
284
+ if numPointsInData == 0:
285
+ return (range(numPoints), pos)
286
+
287
+ result = []
288
+ while len(result) < numPointsInData:
289
+ runHeader = data[pos]
290
+ pos += 1
291
+ numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
292
+ point = 0
293
+ if (runHeader & POINTS_ARE_WORDS) != 0:
294
+ points = array.array("H")
295
+ pointsSize = numPointsInRun * 2
296
+ else:
297
+ points = array.array("B")
298
+ pointsSize = numPointsInRun
299
+ points.frombytes(data[pos : pos + pointsSize])
300
+ if sys.byteorder != "big":
301
+ points.byteswap()
302
+
303
+ assert len(points) == numPointsInRun
304
+ pos += pointsSize
305
+
306
+ result.extend(points)
307
+
308
+ # Convert relative to absolute
309
+ absolute = []
310
+ current = 0
311
+ for delta in result:
312
+ current += delta
313
+ absolute.append(current)
314
+ result = absolute
315
+ del absolute
316
+
317
+ badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
318
+ if badPoints:
319
+ log.warning(
320
+ "point %s out of range in '%s' table"
321
+ % (",".join(sorted(badPoints)), tableTag)
322
+ )
323
+ return (result, pos)
324
+
325
+ def compileDeltas(self):
326
+ deltaX = []
327
+ deltaY = []
328
+ if self.getCoordWidth() == 2:
329
+ for c in self.coordinates:
330
+ if c is None:
331
+ continue
332
+ deltaX.append(c[0])
333
+ deltaY.append(c[1])
334
+ else:
335
+ for c in self.coordinates:
336
+ if c is None:
337
+ continue
338
+ deltaX.append(c)
339
+ bytearr = bytearray()
340
+ self.compileDeltaValues_(deltaX, bytearr)
341
+ self.compileDeltaValues_(deltaY, bytearr)
342
+ return bytearr
343
+
344
+ @staticmethod
345
+ def compileDeltaValues_(deltas, bytearr=None):
346
+ """[value1, value2, value3, ...] --> bytearray
347
+
348
+ Emits a sequence of runs. Each run starts with a
349
+ byte-sized header whose 6 least significant bits
350
+ (header & 0x3F) indicate how many values are encoded
351
+ in this run. The stored length is the actual length
352
+ minus one; run lengths are thus in the range [1..64].
353
+ If the header byte has its most significant bit (0x80)
354
+ set, all values in this run are zero, and no data
355
+ follows. Otherwise, the header byte is followed by
356
+ ((header & 0x3F) + 1) signed values. If (header &
357
+ 0x40) is clear, the delta values are stored as signed
358
+ bytes; if (header & 0x40) is set, the delta values are
359
+ signed 16-bit integers.
360
+ """ # Explaining the format because the 'gvar' spec is hard to understand.
361
+ if bytearr is None:
362
+ bytearr = bytearray()
363
+ pos = 0
364
+ numDeltas = len(deltas)
365
+ while pos < numDeltas:
366
+ value = deltas[pos]
367
+ if value == 0:
368
+ pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
369
+ elif -128 <= value <= 127:
370
+ pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
371
+ elif -32768 <= value <= 32767:
372
+ pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
373
+ else:
374
+ pos = TupleVariation.encodeDeltaRunAsLongs_(deltas, pos, bytearr)
375
+ return bytearr
376
+
377
+ @staticmethod
378
+ def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
379
+ pos = offset
380
+ numDeltas = len(deltas)
381
+ while pos < numDeltas and deltas[pos] == 0:
382
+ pos += 1
383
+ runLength = pos - offset
384
+ while runLength >= 64:
385
+ bytearr.append(DELTAS_ARE_ZERO | 63)
386
+ runLength -= 64
387
+ if runLength:
388
+ bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
389
+ return pos
390
+
391
+ @staticmethod
392
+ def encodeDeltaRunAsBytes_(deltas, offset, bytearr):
393
+ pos = offset
394
+ numDeltas = len(deltas)
395
+ while pos < numDeltas:
396
+ value = deltas[pos]
397
+ if not (-128 <= value <= 127):
398
+ break
399
+ # Within a byte-encoded run of deltas, a single zero
400
+ # is best stored literally as 0x00 value. However,
401
+ # if are two or more zeroes in a sequence, it is
402
+ # better to start a new run. For example, the sequence
403
+ # of deltas [15, 15, 0, 15, 15] becomes 6 bytes
404
+ # (04 0F 0F 00 0F 0F) when storing the zero value
405
+ # literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
406
+ # when starting a new run.
407
+ if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0:
408
+ break
409
+ pos += 1
410
+ runLength = pos - offset
411
+ while runLength >= 64:
412
+ bytearr.append(63)
413
+ bytearr.extend(array.array("b", deltas[offset : offset + 64]))
414
+ offset += 64
415
+ runLength -= 64
416
+ if runLength:
417
+ bytearr.append(runLength - 1)
418
+ bytearr.extend(array.array("b", deltas[offset:pos]))
419
+ return pos
420
+
421
+ @staticmethod
422
+ def encodeDeltaRunAsWords_(deltas, offset, bytearr):
423
+ pos = offset
424
+ numDeltas = len(deltas)
425
+ while pos < numDeltas:
426
+ value = deltas[pos]
427
+
428
+ # Within a word-encoded run of deltas, it is easiest
429
+ # to start a new run (with a different encoding)
430
+ # whenever we encounter a zero value. For example,
431
+ # the sequence [0x6666, 0, 0x7777] needs 7 bytes when
432
+ # storing the zero literally (42 66 66 00 00 77 77),
433
+ # and equally 7 bytes when starting a new run
434
+ # (40 66 66 80 40 77 77).
435
+ if value == 0:
436
+ break
437
+
438
+ # Within a word-encoded run of deltas, a single value
439
+ # in the range (-128..127) should be encoded literally
440
+ # because it is more compact. For example, the sequence
441
+ # [0x6666, 2, 0x7777] becomes 7 bytes when storing
442
+ # the value literally (42 66 66 00 02 77 77), but 8 bytes
443
+ # when starting a new run (40 66 66 00 02 40 77 77).
444
+ if (
445
+ (-128 <= value <= 127)
446
+ and pos + 1 < numDeltas
447
+ and (-128 <= deltas[pos + 1] <= 127)
448
+ ):
449
+ break
450
+
451
+ if not (-32768 <= value <= 32767):
452
+ break
453
+
454
+ pos += 1
455
+ runLength = pos - offset
456
+ while runLength >= 64:
457
+ bytearr.append(DELTAS_ARE_WORDS | 63)
458
+ a = array.array("h", deltas[offset : offset + 64])
459
+ if sys.byteorder != "big":
460
+ a.byteswap()
461
+ bytearr.extend(a)
462
+ offset += 64
463
+ runLength -= 64
464
+ if runLength:
465
+ bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
466
+ a = array.array("h", deltas[offset:pos])
467
+ if sys.byteorder != "big":
468
+ a.byteswap()
469
+ bytearr.extend(a)
470
+ return pos
471
+
472
+ @staticmethod
473
+ def encodeDeltaRunAsLongs_(deltas, offset, bytearr):
474
+ pos = offset
475
+ numDeltas = len(deltas)
476
+ while pos < numDeltas:
477
+ value = deltas[pos]
478
+ if -32768 <= value <= 32767:
479
+ break
480
+ pos += 1
481
+ runLength = pos - offset
482
+ while runLength >= 64:
483
+ bytearr.append(DELTAS_ARE_LONGS | 63)
484
+ a = array.array("i", deltas[offset : offset + 64])
485
+ if sys.byteorder != "big":
486
+ a.byteswap()
487
+ bytearr.extend(a)
488
+ offset += 64
489
+ runLength -= 64
490
+ if runLength:
491
+ bytearr.append(DELTAS_ARE_LONGS | (runLength - 1))
492
+ a = array.array("i", deltas[offset:pos])
493
+ if sys.byteorder != "big":
494
+ a.byteswap()
495
+ bytearr.extend(a)
496
+ return pos
497
+
498
+ @staticmethod
499
+ def decompileDeltas_(numDeltas, data, offset=0):
500
+ """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
501
+ result = []
502
+ pos = offset
503
+ while len(result) < numDeltas if numDeltas is not None else pos < len(data):
504
+ runHeader = data[pos]
505
+ pos += 1
506
+ numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
507
+ if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_ZERO:
508
+ result.extend([0] * numDeltasInRun)
509
+ else:
510
+ if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_LONGS:
511
+ deltas = array.array("i")
512
+ deltasSize = numDeltasInRun * 4
513
+ elif (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_WORDS:
514
+ deltas = array.array("h")
515
+ deltasSize = numDeltasInRun * 2
516
+ else:
517
+ deltas = array.array("b")
518
+ deltasSize = numDeltasInRun
519
+ deltas.frombytes(data[pos : pos + deltasSize])
520
+ if sys.byteorder != "big":
521
+ deltas.byteswap()
522
+ assert len(deltas) == numDeltasInRun, (len(deltas), numDeltasInRun)
523
+ pos += deltasSize
524
+ result.extend(deltas)
525
+ assert numDeltas is None or len(result) == numDeltas
526
+ return (result, pos)
527
+
528
+ @staticmethod
529
+ def getTupleSize_(flags, axisCount):
530
+ size = 4
531
+ if (flags & EMBEDDED_PEAK_TUPLE) != 0:
532
+ size += axisCount * 2
533
+ if (flags & INTERMEDIATE_REGION) != 0:
534
+ size += axisCount * 4
535
+ return size
536
+
537
+ def getCoordWidth(self):
538
+ """Return 2 if coordinates are (x, y) as in gvar, 1 if single values
539
+ as in cvar, or 0 if empty.
540
+ """
541
+ firstDelta = next((c for c in self.coordinates if c is not None), None)
542
+ if firstDelta is None:
543
+ return 0 # empty or has no impact
544
+ if type(firstDelta) in (int, float):
545
+ return 1
546
+ if type(firstDelta) is tuple and len(firstDelta) == 2:
547
+ return 2
548
+ raise TypeError(
549
+ "invalid type of delta; expected (int or float) number, or "
550
+ "Tuple[number, number]: %r" % firstDelta
551
+ )
552
+
553
+ def scaleDeltas(self, scalar):
554
+ if scalar == 1.0:
555
+ return # no change
556
+ coordWidth = self.getCoordWidth()
557
+ self.coordinates = [
558
+ (
559
+ None
560
+ if d is None
561
+ else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar)
562
+ )
563
+ for d in self.coordinates
564
+ ]
565
+
566
+ def roundDeltas(self):
567
+ coordWidth = self.getCoordWidth()
568
+ self.coordinates = [
569
+ (
570
+ None
571
+ if d is None
572
+ else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1]))
573
+ )
574
+ for d in self.coordinates
575
+ ]
576
+
577
+ def calcInferredDeltas(self, origCoords, endPts):
578
+ from fontTools.varLib.iup import iup_delta
579
+
580
+ if self.getCoordWidth() == 1:
581
+ raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
582
+ if None in self.coordinates:
583
+ if len(self.coordinates) != len(origCoords):
584
+ raise ValueError(
585
+ "Expected len(origCoords) == %d; found %d"
586
+ % (len(self.coordinates), len(origCoords))
587
+ )
588
+ self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
589
+
590
+ def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
591
+ from fontTools.varLib.iup import iup_delta_optimize
592
+
593
+ if None in self.coordinates:
594
+ return # already optimized
595
+
596
+ deltaOpt = iup_delta_optimize(
597
+ self.coordinates, origCoords, endPts, tolerance=tolerance
598
+ )
599
+ if None in deltaOpt:
600
+ if isComposite and all(d is None for d in deltaOpt):
601
+ # Fix for macOS composites
602
+ # https://github.com/fonttools/fonttools/issues/1381
603
+ deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
604
+ # Use "optimized" version only if smaller...
605
+ varOpt = TupleVariation(self.axes, deltaOpt)
606
+
607
+ # Shouldn't matter that this is different from fvar...?
608
+ axisTags = sorted(self.axes.keys())
609
+ tupleData, auxData = self.compile(axisTags)
610
+ unoptimizedLength = len(tupleData) + len(auxData)
611
+ tupleData, auxData = varOpt.compile(axisTags)
612
+ optimizedLength = len(tupleData) + len(auxData)
613
+
614
+ if optimizedLength < unoptimizedLength:
615
+ self.coordinates = varOpt.coordinates
616
+
617
+ def __imul__(self, scalar):
618
+ self.scaleDeltas(scalar)
619
+ return self
620
+
621
+ def __iadd__(self, other):
622
+ if not isinstance(other, TupleVariation):
623
+ return NotImplemented
624
+ deltas1 = self.coordinates
625
+ length = len(deltas1)
626
+ deltas2 = other.coordinates
627
+ if len(deltas2) != length:
628
+ raise ValueError("cannot sum TupleVariation deltas with different lengths")
629
+ # 'None' values have different meanings in gvar vs cvar TupleVariations:
630
+ # within the gvar, when deltas are not provided explicitly for some points,
631
+ # they need to be inferred; whereas for the 'cvar' table, if deltas are not
632
+ # provided for some CVT values, then no adjustments are made (i.e. None == 0).
633
+ # Thus, we cannot sum deltas for gvar TupleVariations if they contain
634
+ # inferred inferred deltas (the latter need to be computed first using
635
+ # 'calcInferredDeltas' method), but we can treat 'None' values in cvar
636
+ # deltas as if they are zeros.
637
+ if self.getCoordWidth() == 2:
638
+ for i, d2 in zip(range(length), deltas2):
639
+ d1 = deltas1[i]
640
+ try:
641
+ deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
642
+ except TypeError:
643
+ raise ValueError("cannot sum gvar deltas with inferred points")
644
+ else:
645
+ for i, d2 in zip(range(length), deltas2):
646
+ d1 = deltas1[i]
647
+ if d1 is not None and d2 is not None:
648
+ deltas1[i] = d1 + d2
649
+ elif d1 is None and d2 is not None:
650
+ deltas1[i] = d2
651
+ # elif d2 is None do nothing
652
+ return self
653
+
654
+
655
+ def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
656
+ result = []
657
+ for _ in range(sharedTupleCount):
658
+ t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
659
+ result.append(t)
660
+ return result
661
+
662
+
663
+ def compileSharedTuples(
664
+ axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
665
+ ):
666
+ coordCount = Counter()
667
+ for var in variations:
668
+ coord = var.compileCoord(axisTags)
669
+ coordCount[coord] += 1
670
+ # In python < 3.7, most_common() ordering is non-deterministic
671
+ # so apply a sort to make sure the ordering is consistent.
672
+ sharedCoords = sorted(
673
+ coordCount.most_common(MAX_NUM_SHARED_COORDS),
674
+ key=lambda item: (-item[1], item[0]),
675
+ )
676
+ return [c[0] for c in sharedCoords if c[1] > 1]
677
+
678
+
679
+ def compileTupleVariationStore(
680
+ variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True
681
+ ):
682
+ # pointCount is actually unused. Keeping for API compat.
683
+ del pointCount
684
+ newVariations = []
685
+ pointDatas = []
686
+ # Compile all points and figure out sharing if desired
687
+ sharedPoints = None
688
+
689
+ # Collect, count, and compile point-sets for all variation sets
690
+ pointSetCount = defaultdict(int)
691
+ for v in variations:
692
+ points = v.getUsedPoints()
693
+ if points is None: # Empty variations
694
+ continue
695
+ pointSetCount[points] += 1
696
+ newVariations.append(v)
697
+ pointDatas.append(points)
698
+ variations = newVariations
699
+ del newVariations
700
+
701
+ if not variations:
702
+ return (0, b"", b"")
703
+
704
+ n = len(variations[0].coordinates)
705
+ assert all(
706
+ len(v.coordinates) == n for v in variations
707
+ ), "Variation sets have different sizes"
708
+
709
+ compiledPoints = {
710
+ pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
711
+ }
712
+
713
+ tupleVariationCount = len(variations)
714
+ tuples = []
715
+ data = []
716
+
717
+ if useSharedPoints:
718
+ # Find point-set which saves most bytes.
719
+ def key(pn):
720
+ pointSet = pn[0]
721
+ count = pn[1]
722
+ return len(compiledPoints[pointSet]) * (count - 1)
723
+
724
+ sharedPoints = max(pointSetCount.items(), key=key)[0]
725
+
726
+ data.append(compiledPoints[sharedPoints])
727
+ tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
728
+
729
+ # b'' implies "use shared points"
730
+ pointDatas = [
731
+ compiledPoints[points] if points != sharedPoints else b""
732
+ for points in pointDatas
733
+ ]
734
+
735
+ for v, p in zip(variations, pointDatas):
736
+ thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p)
737
+
738
+ tuples.append(thisTuple)
739
+ data.append(thisData)
740
+
741
+ tuples = b"".join(tuples)
742
+ data = b"".join(data)
743
+ return tupleVariationCount, tuples, data
744
+
745
+
746
+ def decompileTupleVariationStore(
747
+ tableTag,
748
+ axisTags,
749
+ tupleVariationCount,
750
+ pointCount,
751
+ sharedTuples,
752
+ data,
753
+ pos,
754
+ dataPos,
755
+ ):
756
+ numAxes = len(axisTags)
757
+ result = []
758
+ if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
759
+ sharedPoints, dataPos = TupleVariation.decompilePoints_(
760
+ pointCount, data, dataPos, tableTag
761
+ )
762
+ else:
763
+ sharedPoints = []
764
+ for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
765
+ dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
766
+ tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
767
+ tupleData = data[pos : pos + tupleSize]
768
+ pointDeltaData = data[dataPos : dataPos + dataSize]
769
+ result.append(
770
+ decompileTupleVariation_(
771
+ pointCount,
772
+ sharedTuples,
773
+ sharedPoints,
774
+ tableTag,
775
+ axisTags,
776
+ tupleData,
777
+ pointDeltaData,
778
+ )
779
+ )
780
+ pos += tupleSize
781
+ dataPos += dataSize
782
+ return result
783
+
784
+
785
+ def decompileTupleVariation_(
786
+ pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
787
+ ):
788
+ assert tableTag in ("cvar", "gvar"), tableTag
789
+ flags = struct.unpack(">H", data[2:4])[0]
790
+ pos = 4
791
+ if (flags & EMBEDDED_PEAK_TUPLE) == 0:
792
+ peak = sharedTuples[flags & TUPLE_INDEX_MASK]
793
+ else:
794
+ peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
795
+ if (flags & INTERMEDIATE_REGION) != 0:
796
+ start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
797
+ end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
798
+ else:
799
+ start, end = inferRegion_(peak)
800
+ axes = {}
801
+ for axis in axisTags:
802
+ region = start[axis], peak[axis], end[axis]
803
+ if region != (0.0, 0.0, 0.0):
804
+ axes[axis] = region
805
+ pos = 0
806
+ if (flags & PRIVATE_POINT_NUMBERS) != 0:
807
+ points, pos = TupleVariation.decompilePoints_(
808
+ pointCount, tupleData, pos, tableTag
809
+ )
810
+ else:
811
+ points = sharedPoints
812
+
813
+ deltas = [None] * pointCount
814
+
815
+ if tableTag == "cvar":
816
+ deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
817
+ for p, delta in zip(points, deltas_cvt):
818
+ if 0 <= p < pointCount:
819
+ deltas[p] = delta
820
+
821
+ elif tableTag == "gvar":
822
+ deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
823
+ deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
824
+ for p, x, y in zip(points, deltas_x, deltas_y):
825
+ if 0 <= p < pointCount:
826
+ deltas[p] = (x, y)
827
+
828
+ return TupleVariation(axes, deltas)
829
+
830
+
831
+ def inferRegion_(peak):
832
+ """Infer start and end for a (non-intermediate) region
833
+
834
+ This helper function computes the applicability region for
835
+ variation tuples whose INTERMEDIATE_REGION flag is not set in the
836
+ TupleVariationHeader structure. Variation tuples apply only to
837
+ certain regions of the variation space; outside that region, the
838
+ tuple has no effect. To make the binary encoding more compact,
839
+ TupleVariationHeaders can omit the intermediateStartTuple and
840
+ intermediateEndTuple fields.
841
+ """
842
+ start, end = {}, {}
843
+ for axis, value in peak.items():
844
+ start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
845
+ end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
846
+ return (start, end)
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .otBase import BaseTTXConverter
2
+
3
+
4
+ class table_V_A_R_C_(BaseTTXConverter):
5
+ pass
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/B_A_S_E_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/BitmapGlyphMetrics.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/D__e_b_g.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_K_G_.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_a_t.cpython-310.pyc ADDED
Binary file (7.19 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/J_S_T_F_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_V_A_R_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc ADDED
Binary file (383 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc ADDED
Binary file (20.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc ADDED
Binary file (5.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc ADDED
Binary file (6.39 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc ADDED
Binary file (35.3 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_a_s_p.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_c_i_d.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_c_a_r.cpython-310.pyc ADDED
Binary file (393 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_e_t_a.cpython-310.pyc ADDED
Binary file (2.96 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_t.cpython-310.pyc ADDED
Binary file (393 Bytes). View file