diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py new file mode 100644 index 0000000000000000000000000000000000000000..f468a963a1e2a8d503b57f4d7aeff12b8770cc67 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/B_A_S_E_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_B_A_S_E_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py new file mode 100644 index 0000000000000000000000000000000000000000..edbb0b92f77e3198b55920879271f481082131ea --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F__2.py @@ -0,0 +1,13 @@ +from io import BytesIO +from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_ + + +class table_C_F_F__2(table_C_F_F_): + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont, isCFF2=True) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont, isCFF2=True) + return f.getvalue() diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py new file mode 100644 index 0000000000000000000000000000000000000000..df857842cc300f5506fc5940395ad583a27b59bc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_O_L_R_.py @@ -0,0 +1,157 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from fontTools.misc.textTools import safeEval +from . import DefaultTable + + +class table_C_O_L_R_(DefaultTable.DefaultTable): + """This table is structured so that you can treat it like a dictionary keyed by glyph name. + + ``ttFont['COLR'][]`` will return the color layers for any glyph. + + ``ttFont['COLR'][] = `` will set the color layers for any glyph. + """ + + @staticmethod + def _decompileColorLayersV0(table): + if not table.LayerRecordArray: + return {} + colorLayerLists = {} + layerRecords = table.LayerRecordArray.LayerRecord + numLayerRecords = len(layerRecords) + for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord: + baseGlyph = baseRec.BaseGlyph + firstLayerIndex = baseRec.FirstLayerIndex + numLayers = baseRec.NumLayers + assert firstLayerIndex + numLayers <= numLayerRecords + layers = [] + for i in range(firstLayerIndex, firstLayerIndex + numLayers): + layerRec = layerRecords[i] + layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex)) + colorLayerLists[baseGlyph] = layers + return colorLayerLists + + def _toOTTable(self, ttFont): + from . import otTables + from fontTools.colorLib.builder import populateCOLRv0 + + tableClass = getattr(otTables, self.tableTag) + table = tableClass() + table.Version = self.version + + populateCOLRv0( + table, + { + baseGlyph: [(layer.name, layer.colorID) for layer in layers] + for baseGlyph, layers in self.ColorLayers.items() + }, + glyphMap=ttFont.getReverseGlyphMap(rebuild=True), + ) + return table + + def decompile(self, data, ttFont): + from .otBase import OTTableReader + from . import otTables + + # We use otData to decompile, but we adapt the decompiled otTables to the + # existing COLR v0 API for backward compatibility. + reader = OTTableReader(data, tableTag=self.tableTag) + tableClass = getattr(otTables, self.tableTag) + table = tableClass() + table.decompile(reader, ttFont) + + self.version = table.Version + if self.version == 0: + self.ColorLayers = self._decompileColorLayersV0(table) + else: + # for new versions, keep the raw otTables around + self.table = table + + def compile(self, ttFont): + from .otBase import OTTableWriter + + if hasattr(self, "table"): + table = self.table + else: + table = self._toOTTable(ttFont) + + writer = OTTableWriter(tableTag=self.tableTag) + table.compile(writer, ttFont) + return writer.getAllData() + + def toXML(self, writer, ttFont): + if hasattr(self, "table"): + self.table.toXML2(writer, ttFont) + else: + writer.simpletag("version", value=self.version) + writer.newline() + for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID): + writer.begintag("ColorGlyph", name=baseGlyph) + writer.newline() + for layer in self.ColorLayers[baseGlyph]: + layer.toXML(writer, ttFont) + writer.endtag("ColorGlyph") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": # old COLR v0 API + setattr(self, name, safeEval(attrs["value"])) + elif name == "ColorGlyph": + if not hasattr(self, "ColorLayers"): + self.ColorLayers = {} + glyphName = attrs["name"] + for element in content: + if isinstance(element, str): + continue + layers = [] + for element in content: + if isinstance(element, str): + continue + layer = LayerRecord() + layer.fromXML(element[0], element[1], element[2], ttFont) + layers.append(layer) + self.ColorLayers[glyphName] = layers + else: # new COLR v1 API + from . import otTables + + if not hasattr(self, "table"): + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.fromXML(name, attrs, content, ttFont) + self.table.populateDefaults() + self.version = self.table.Version + + def __getitem__(self, glyphName): + if not isinstance(glyphName, str): + raise TypeError(f"expected str, found {type(glyphName).__name__}") + return self.ColorLayers[glyphName] + + def __setitem__(self, glyphName, value): + if not isinstance(glyphName, str): + raise TypeError(f"expected str, found {type(glyphName).__name__}") + if value is not None: + self.ColorLayers[glyphName] = value + elif glyphName in self.ColorLayers: + del self.ColorLayers[glyphName] + + def __delitem__(self, glyphName): + del self.ColorLayers[glyphName] + + +class LayerRecord(object): + def __init__(self, name=None, colorID=None): + self.name = name + self.colorID = colorID + + def toXML(self, writer, ttFont): + writer.simpletag("layer", name=self.name, colorID=self.colorID) + writer.newline() + + def fromXML(self, eltname, attrs, content, ttFont): + for name, value in attrs.items(): + if name == "name": + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py new file mode 100644 index 0000000000000000000000000000000000000000..9fb2074afceb4171ff896bbd0b12df2f6526d652 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_P_A_L_.py @@ -0,0 +1,296 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Behdad Esfahbod + +from fontTools.misc.textTools import bytesjoin, safeEval +from . import DefaultTable +import array +from collections import namedtuple +import struct +import sys + + +class table_C_P_A_L_(DefaultTable.DefaultTable): + NO_NAME_ID = 0xFFFF + DEFAULT_PALETTE_TYPE = 0 + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.palettes = [] + self.paletteTypes = [] + self.paletteLabels = [] + self.paletteEntryLabels = [] + + def decompile(self, data, ttFont): + ( + self.version, + self.numPaletteEntries, + numPalettes, + numColorRecords, + goffsetFirstColorRecord, + ) = struct.unpack(">HHHHL", data[:12]) + assert ( + self.version <= 1 + ), "Version of CPAL table is higher than I know how to handle" + self.palettes = [] + pos = 12 + for i in range(numPalettes): + startIndex = struct.unpack(">H", data[pos : pos + 2])[0] + assert startIndex + self.numPaletteEntries <= numColorRecords + pos += 2 + palette = [] + ppos = goffsetFirstColorRecord + startIndex * 4 + for j in range(self.numPaletteEntries): + palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4]))) + ppos += 4 + self.palettes.append(palette) + if self.version == 0: + offsetToPaletteTypeArray = 0 + offsetToPaletteLabelArray = 0 + offsetToPaletteEntryLabelArray = 0 + else: + pos = 12 + numPalettes * 2 + ( + offsetToPaletteTypeArray, + offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray, + ) = struct.unpack(">LLL", data[pos : pos + 12]) + self.paletteTypes = self._decompileUInt32Array( + data, + offsetToPaletteTypeArray, + numPalettes, + default=self.DEFAULT_PALETTE_TYPE, + ) + self.paletteLabels = self._decompileUInt16Array( + data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID + ) + self.paletteEntryLabels = self._decompileUInt16Array( + data, + offsetToPaletteEntryLabelArray, + self.numPaletteEntries, + default=self.NO_NAME_ID, + ) + + def _decompileUInt16Array(self, data, offset, numElements, default=0): + if offset == 0: + return [default] * numElements + result = array.array("H", data[offset : offset + 2 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() + + def _decompileUInt32Array(self, data, offset, numElements, default=0): + if offset == 0: + return [default] * numElements + result = array.array("I", data[offset : offset + 4 * numElements]) + if sys.byteorder != "big": + result.byteswap() + assert len(result) == numElements, result + return result.tolist() + + def compile(self, ttFont): + colorRecordIndices, colorRecords = self._compileColorRecords() + paletteTypes = self._compilePaletteTypes() + paletteLabels = self._compilePaletteLabels() + paletteEntryLabels = self._compilePaletteEntryLabels() + numColorRecords = len(colorRecords) // 4 + offsetToFirstColorRecord = 12 + len(colorRecordIndices) + if self.version >= 1: + offsetToFirstColorRecord += 12 + header = struct.pack( + ">HHHHL", + self.version, + self.numPaletteEntries, + len(self.palettes), + numColorRecords, + offsetToFirstColorRecord, + ) + if self.version == 0: + dataList = [header, colorRecordIndices, colorRecords] + else: + pos = offsetToFirstColorRecord + len(colorRecords) + if len(paletteTypes) == 0: + offsetToPaletteTypeArray = 0 + else: + offsetToPaletteTypeArray = pos + pos += len(paletteTypes) + if len(paletteLabels) == 0: + offsetToPaletteLabelArray = 0 + else: + offsetToPaletteLabelArray = pos + pos += len(paletteLabels) + if len(paletteEntryLabels) == 0: + offsetToPaletteEntryLabelArray = 0 + else: + offsetToPaletteEntryLabelArray = pos + pos += len(paletteLabels) + header1 = struct.pack( + ">LLL", + offsetToPaletteTypeArray, + offsetToPaletteLabelArray, + offsetToPaletteEntryLabelArray, + ) + dataList = [ + header, + colorRecordIndices, + header1, + colorRecords, + paletteTypes, + paletteLabels, + paletteEntryLabels, + ] + return bytesjoin(dataList) + + def _compilePalette(self, palette): + assert len(palette) == self.numPaletteEntries + pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha) + return bytesjoin([pack(color) for color in palette]) + + def _compileColorRecords(self): + colorRecords, colorRecordIndices, pool = [], [], {} + for palette in self.palettes: + packedPalette = self._compilePalette(palette) + if packedPalette in pool: + index = pool[packedPalette] + else: + index = len(colorRecords) + colorRecords.append(packedPalette) + pool[packedPalette] = index + colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries)) + return bytesjoin(colorRecordIndices), bytesjoin(colorRecords) + + def _compilePaletteTypes(self): + if self.version == 0 or not any(self.paletteTypes): + return b"" + assert len(self.paletteTypes) == len(self.palettes) + result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes]) + assert len(result) == 4 * len(self.palettes) + return result + + def _compilePaletteLabels(self): + if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels): + return b"" + assert len(self.paletteLabels) == len(self.palettes) + result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels]) + assert len(result) == 2 * len(self.palettes) + return result + + def _compilePaletteEntryLabels(self): + if self.version == 0 or all( + l == self.NO_NAME_ID for l in self.paletteEntryLabels + ): + return b"" + assert len(self.paletteEntryLabels) == self.numPaletteEntries + result = bytesjoin( + [struct.pack(">H", label) for label in self.paletteEntryLabels] + ) + assert len(result) == 2 * self.numPaletteEntries + return result + + def toXML(self, writer, ttFont): + numPalettes = len(self.palettes) + paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)} + paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)} + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("numPaletteEntries", value=self.numPaletteEntries) + writer.newline() + for index, palette in enumerate(self.palettes): + attrs = {"index": index} + paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE) + paletteLabel = paletteLabels.get(index, self.NO_NAME_ID) + if self.version > 0 and paletteLabel != self.NO_NAME_ID: + attrs["label"] = paletteLabel + if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE: + attrs["type"] = paletteType + writer.begintag("palette", **attrs) + writer.newline() + if ( + self.version > 0 + and paletteLabel != self.NO_NAME_ID + and ttFont + and "name" in ttFont + ): + name = ttFont["name"].getDebugName(paletteLabel) + if name is not None: + writer.comment(name) + writer.newline() + assert len(palette) == self.numPaletteEntries + for cindex, color in enumerate(palette): + color.toXML(writer, ttFont, cindex) + writer.endtag("palette") + writer.newline() + if self.version > 0 and not all( + l == self.NO_NAME_ID for l in self.paletteEntryLabels + ): + writer.begintag("paletteEntryLabels") + writer.newline() + for index, label in enumerate(self.paletteEntryLabels): + if label != self.NO_NAME_ID: + writer.simpletag("label", index=index, value=label) + if self.version > 0 and label and ttFont and "name" in ttFont: + name = ttFont["name"].getDebugName(label) + if name is not None: + writer.comment(name) + writer.newline() + writer.endtag("paletteEntryLabels") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "palette": + self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID))) + self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE))) + palette = [] + for element in content: + if isinstance(element, str): + continue + attrs = element[1] + color = Color.fromHex(attrs["value"]) + palette.append(color) + self.palettes.append(palette) + elif name == "paletteEntryLabels": + colorLabels = {} + for element in content: + if isinstance(element, str): + continue + elementName, elementAttr, _ = element + if elementName == "label": + labelIndex = safeEval(elementAttr["index"]) + nameID = safeEval(elementAttr["value"]) + colorLabels[labelIndex] = nameID + self.paletteEntryLabels = [ + colorLabels.get(i, self.NO_NAME_ID) + for i in range(self.numPaletteEntries) + ] + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) + if name == "numPaletteEntries": + self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries + + +class Color(namedtuple("Color", "blue green red alpha")): + def hex(self): + return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha) + + def __repr__(self): + return self.hex() + + def toXML(self, writer, ttFont, index=None): + writer.simpletag("color", value=self.hex(), index=index) + writer.newline() + + @classmethod + def fromHex(cls, value): + if value[0] == "#": + value = value[1:] + red = int(value[0:2], 16) + green = int(value[2:4], 16) + blue = int(value[4:6], 16) + alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF + return cls(red=red, green=green, blue=blue, alpha=alpha) + + @classmethod + def fromRGBA(cls, red, green, blue, alpha): + return cls(red=red, green=green, blue=blue, alpha=alpha) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py new file mode 100644 index 0000000000000000000000000000000000000000..54449a5fd6e39c695af425a5523ecc231aa3c474 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D__e_b_g.py @@ -0,0 +1,17 @@ +import json + +from . import DefaultTable + + +class table_D__e_b_g(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + self.data = json.loads(data) + + def compile(self, ttFont): + return json.dumps(self.data).encode("utf-8") + + def toXML(self, writer, ttFont): + writer.writecdata(json.dumps(self.data, indent=2)) + + def fromXML(self, name, attrs, content, ttFont): + self.data = json.loads(content) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_D_T_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_D_T_.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7f82efd556c90cc3361fa9a59bc4a47d58f3e6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_D_T_.py @@ -0,0 +1,827 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import ( + bytechr, + byteord, + bytesjoin, + strjoin, + safeEval, + readHex, + hexStr, + deHexStr, +) +from .BitmapGlyphMetrics import ( + BigGlyphMetrics, + bigGlyphMetricsFormat, + SmallGlyphMetrics, + smallGlyphMetricsFormat, +) +from . import DefaultTable +import itertools +import os +import struct +import logging + + +log = logging.getLogger(__name__) + +ebdtTableVersionFormat = """ + > # big endian + version: 16.16F +""" + +ebdtComponentFormat = """ + > # big endian + glyphCode: H + xOffset: b + yOffset: b +""" + + +class table_E_B_D_T_(DefaultTable.DefaultTable): + # Keep a reference to the name of the data locator table. + locatorName = "EBLC" + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getImageFormatClass(self, imageFormat): + return ebdt_bitmap_classes[imageFormat] + + def decompile(self, data, ttFont): + # Get the version but don't advance the slice. + # Most of the lookup for this table is done relative + # to the begining so slice by the offsets provided + # in the EBLC table. + sstruct.unpack2(ebdtTableVersionFormat, data, self) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps intervals of data to the BitmapGlyph. + glyphDict = {} + + # Pull out the EBLC table and loop through glyphs. + # A strike is a concept that spans both tables. + # The actual bitmap data is stored in the EBDT. + locator = ttFont[self.__class__.locatorName] + self.strikeData = [] + for curStrike in locator.strikes: + bitmapGlyphDict = {} + self.strikeData.append(bitmapGlyphDict) + for indexSubTable in curStrike.indexSubTables: + dataIter = zip(indexSubTable.names, indexSubTable.locations) + for curName, curLoc in dataIter: + # Don't create duplicate data entries for the same glyphs. + # Instead just use the structures that already exist if they exist. + if curLoc in glyphDict: + curGlyph = glyphDict[curLoc] + else: + curGlyphData = data[slice(*curLoc)] + imageFormatClass = self.getImageFormatClass( + indexSubTable.imageFormat + ) + curGlyph = imageFormatClass(curGlyphData, ttFont) + glyphDict[curLoc] = curGlyph + bitmapGlyphDict[curName] = curGlyph + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(ebdtTableVersionFormat, self)) + dataSize = len(dataList[0]) + + # Keep a dict of glyphs that have been seen so they aren't remade. + # This dict maps the id of the BitmapGlyph to the interval + # in the data. + glyphDict = {} + + # Go through the bitmap glyph data. Just in case the data for a glyph + # changed the size metrics should be recalculated. There are a variety + # of formats and they get stored in the EBLC table. That is why + # recalculation is defered to the EblcIndexSubTable class and just + # pass what is known about bitmap glyphs from this particular table. + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + dataLocations = [] + for curName in curIndexSubTable.names: + # Handle the data placement based on seeing the glyph or not. + # Just save a reference to the location if the glyph has already + # been saved in compile. This code assumes that glyphs will only + # be referenced multiple times from indexFormat5. By luck the + # code may still work when referencing poorly ordered fonts with + # duplicate references. If there is a font that is unlucky the + # respective compile methods for the indexSubTables will fail + # their assertions. All fonts seem to follow this assumption. + # More complicated packing may be needed if a counter-font exists. + glyph = curGlyphDict[curName] + objectId = id(glyph) + if objectId not in glyphDict: + data = glyph.compile(ttFont) + data = curIndexSubTable.padBitmapData(data) + startByte = dataSize + dataSize += len(data) + endByte = dataSize + dataList.append(data) + dataLoc = (startByte, endByte) + glyphDict[objectId] = dataLoc + else: + dataLoc = glyphDict[objectId] + dataLocations.append(dataLoc) + # Just use the new data locations in the indexSubTable. + # The respective compile implementations will take care + # of any of the problems in the convertion that may arise. + curIndexSubTable.locations = dataLocations + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + # When exporting to XML if one of the data export formats + # requires metrics then those metrics may be in the locator. + # In this case populate the bitmaps with "export metrics". + if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"): + locator = ttFont[self.__class__.locatorName] + for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData): + for curIndexSubTable in curStrike.indexSubTables: + for curName in curIndexSubTable.names: + glyph = curGlyphDict[curName] + # I'm not sure which metrics have priority here. + # For now if both metrics exist go with glyph metrics. + if hasattr(glyph, "metrics"): + glyph.exportMetrics = glyph.metrics + else: + glyph.exportMetrics = curIndexSubTable.metrics + glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth + + writer.simpletag("header", [("version", self.version)]) + writer.newline() + locator = ttFont[self.__class__.locatorName] + for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData): + writer.begintag("strikedata", [("index", strikeIndex)]) + writer.newline() + for curName, curBitmap in bitmapGlyphDict.items(): + curBitmap.toXML(strikeIndex, curName, writer, ttFont) + writer.endtag("strikedata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "header": + self.version = safeEval(attrs["version"]) + elif name == "strikedata": + if not hasattr(self, "strikeData"): + self.strikeData = [] + strikeIndex = safeEval(attrs["index"]) + + bitmapGlyphDict = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]): + imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :]) + glyphName = attrs["name"] + imageFormatClass = self.getImageFormatClass(imageFormat) + curGlyph = imageFormatClass(None, None) + curGlyph.fromXML(name, attrs, content, ttFont) + assert glyphName not in bitmapGlyphDict, ( + "Duplicate glyphs with the same name '%s' in the same strike." + % glyphName + ) + bitmapGlyphDict[glyphName] = curGlyph + else: + log.warning("%s being ignored by %s", name, self.__class__.__name__) + + # Grow the strike data array to the appropriate size. The XML + # format allows the strike index value to be out of order. + if strikeIndex >= len(self.strikeData): + self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData)) + assert ( + self.strikeData[strikeIndex] is None + ), "Duplicate strike EBDT indices." + self.strikeData[strikeIndex] = bitmapGlyphDict + + +class EbdtComponent(object): + def toXML(self, writer, ttFont): + writer.begintag("ebdtComponent", [("name", self.name)]) + writer.newline() + for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]: + writer.simpletag(componentName, value=getattr(self, componentName)) + writer.newline() + writer.endtag("ebdtComponent") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.name = attrs["name"] + componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in componentNames: + vars(self)[name] = safeEval(attrs["value"]) + else: + log.warning("unknown name '%s' being ignored by EbdtComponent.", name) + + +# Helper functions for dealing with binary. + + +def _data2binary(data, numBits): + binaryList = [] + for curByte in data: + value = byteord(curByte) + numBitsCut = min(8, numBits) + for i in range(numBitsCut): + if value & 0x1: + binaryList.append("1") + else: + binaryList.append("0") + value = value >> 1 + numBits -= numBitsCut + return strjoin(binaryList) + + +def _binary2data(binary): + byteList = [] + for bitLoc in range(0, len(binary), 8): + byteString = binary[bitLoc : bitLoc + 8] + curByte = 0 + for curBit in reversed(byteString): + curByte = curByte << 1 + if curBit == "1": + curByte |= 1 + byteList.append(bytechr(curByte)) + return bytesjoin(byteList) + + +def _memoize(f): + class memodict(dict): + def __missing__(self, key): + ret = f(key) + if isinstance(key, int) or len(key) == 1: + self[key] = ret + return ret + + return memodict().__getitem__ + + +# 00100111 -> 11100100 per byte, not to be confused with little/big endian. +# Bitmap data per byte is in the order that binary is written on the page +# with the least significant bit as far right as possible. This is the +# opposite of what makes sense algorithmically and hence this function. +@_memoize +def _reverseBytes(data): + r""" + >>> bin(ord(_reverseBytes(0b00100111))) + '0b11100100' + >>> _reverseBytes(b'\x00\xf0') + b'\x00\x0f' + """ + if isinstance(data, bytes) and len(data) != 1: + return bytesjoin(map(_reverseBytes, data)) + byte = byteord(data) + result = 0 + for i in range(8): + result = result << 1 + result |= byte & 1 + byte = byte >> 1 + return bytechr(result) + + +# This section of code is for reading and writing image data to/from XML. + + +def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + writer.begintag("rawimagedata") + writer.newline() + writer.dumphex(bitmapObject.imageData) + writer.endtag("rawimagedata") + writer.newline() + + +def _readRawImageData(bitmapObject, name, attrs, content, ttFont): + bitmapObject.imageData = readHex(content) + + +def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + writer.begintag( + "rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height + ) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics) + writer.simpletag("row", value=hexStr(rowData)) + writer.newline() + writer.endtag("rowimagedata") + writer.newline() + + +def _readRowImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs["bitDepth"]) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs["width"]) + metrics.height = safeEval(attrs["height"]) + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + # Chop off 'imagedata' from the tag to get just the option. + if name == "row": + dataRows.append(deHexStr(attr["value"])) + bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics) + + +def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + metrics = bitmapObject.exportMetrics + del bitmapObject.exportMetrics + bitDepth = bitmapObject.exportBitDepth + del bitmapObject.exportBitDepth + + # A dict for mapping binary to more readable/artistic ASCII characters. + binaryConv = {"0": ".", "1": "@"} + + writer.begintag( + "bitwiseimagedata", + bitDepth=bitDepth, + width=metrics.width, + height=metrics.height, + ) + writer.newline() + for curRow in range(metrics.height): + rowData = bitmapObject.getRow( + curRow, bitDepth=1, metrics=metrics, reverseBytes=True + ) + rowData = _data2binary(rowData, metrics.width) + # Make the output a readable ASCII art form. + rowData = strjoin(map(binaryConv.get, rowData)) + writer.simpletag("row", value=rowData) + writer.newline() + writer.endtag("bitwiseimagedata") + writer.newline() + + +def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont): + bitDepth = safeEval(attrs["bitDepth"]) + metrics = SmallGlyphMetrics() + metrics.width = safeEval(attrs["width"]) + metrics.height = safeEval(attrs["height"]) + + # A dict for mapping from ASCII to binary. All characters are considered + # a '1' except space, period and '0' which maps to '0'. + binaryConv = {" ": "0", ".": "0", "0": "0"} + + dataRows = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == "row": + mapParams = zip(attr["value"], itertools.repeat("1")) + rowData = strjoin(itertools.starmap(binaryConv.get, mapParams)) + dataRows.append(_binary2data(rowData)) + + bitmapObject.setRows( + dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True + ) + + +def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont): + try: + folder = os.path.dirname(writer.file.name) + except AttributeError: + # fall back to current directory if output file's directory isn't found + folder = "." + folder = os.path.join(folder, "bitmaps") + filename = glyphName + bitmapObject.fileExtension + if not os.path.isdir(folder): + os.makedirs(folder) + folder = os.path.join(folder, "strike%d" % strikeIndex) + if not os.path.isdir(folder): + os.makedirs(folder) + + fullPath = os.path.join(folder, filename) + writer.simpletag("extfileimagedata", value=fullPath) + writer.newline() + + with open(fullPath, "wb") as file: + file.write(bitmapObject.imageData) + + +def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont): + fullPath = attrs["value"] + with open(fullPath, "rb") as file: + bitmapObject.imageData = file.read() + + +# End of XML writing code. + +# Important information about the naming scheme. Used for identifying formats +# in XML. +_bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_" + + +class BitmapGlyph(object): + # For the external file format. This can be changed in subclasses. This way + # when the extfile option is turned on files have the form: glyphName.ext + # The default is just a flat binary file with no meaning. + fileExtension = ".bin" + + # Keep track of reading and writing of various forms. + xmlDataFunctions = { + "raw": (_writeRawImageData, _readRawImageData), + "row": (_writeRowImageData, _readRowImageData), + "bitwise": (_writeBitwiseImageData, _readBitwiseImageData), + "extfile": (_writeExtFileImageData, _readExtFileImageData), + } + + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompilation is untested here... + # if not ttFont.lazy: + # self.decompile() + # del self.data + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == "__": + raise AttributeError(attr) + if attr == "data": + raise AttributeError(attr) + self.decompile() + del self.data + return getattr(self, attr) + + def ensureDecompiled(self, recurse=False): + if hasattr(self, "data"): + self.decompile() + del self.data + + # Not a fan of this but it is needed for safer safety checking. + def getFormat(self): + return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :]) + + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [("name", glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + # Use the internal write method to write using the correct output format. + self.writeData(strikeIndex, glyphName, writer, ttFont) + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if not name.endswith("imagedata"): + continue + # Chop off 'imagedata' from the tag to get just the option. + option = name[: -len("imagedata")] + assert option in self.__class__.xmlDataFunctions + self.readData(name, attr, content, ttFont) + + # Some of the glyphs have the metrics. This allows for metrics to be + # added if the glyph format has them. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # The opposite of write metrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + def writeData(self, strikeIndex, glyphName, writer, ttFont): + try: + writeFunc, readFunc = self.__class__.xmlDataFunctions[ + ttFont.bitmapGlyphDataFormat + ] + except KeyError: + writeFunc = _writeRawImageData + writeFunc(strikeIndex, glyphName, self, writer, ttFont) + + def readData(self, name, attrs, content, ttFont): + # Chop off 'imagedata' from the tag to get just the option. + option = name[: -len("imagedata")] + writeFunc, readFunc = self.__class__.xmlDataFunctions[option] + readFunc(self, name, attrs, content, ttFont) + + +# A closure for creating a mixin for the two types of metrics handling. +# Most of the code is very similar so its easier to deal with here. +# Everything works just by passing the class that the mixin is for. +def _createBitmapPlusMetricsMixin(metricsClass): + # Both metrics names are listed here to make meaningful error messages. + metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__] + curMetricsName = metricsClass.__name__ + # Find which metrics this is for and determine the opposite name. + metricsId = metricStrings.index(curMetricsName) + oppositeMetricsName = metricStrings[1 - metricsId] + + class BitmapPlusMetricsMixin(object): + def writeMetrics(self, writer, ttFont): + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == curMetricsName: + self.metrics = metricsClass() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == oppositeMetricsName: + log.warning( + "Warning: %s being ignored in format %d.", + oppositeMetricsName, + self.getFormat(), + ) + + return BitmapPlusMetricsMixin + + +# Since there are only two types of mixin's just create them here. +BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics) +BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics) + + +# Data that is bit aligned can be tricky to deal with. These classes implement +# helper functionality for dealing with the data and getting a particular row +# of bitwise data. Also helps implement fancy data export/import in XML. +class BitAlignedBitmapMixin(object): + def _getBitRange(self, row, bitDepth, metrics): + rowBits = bitDepth * metrics.width + bitOffset = row * rowBits + return (bitOffset, bitOffset + rowBits) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + + # Loop through each byte. This can cover two bytes in the original data or + # a single byte if things happen to be aligned. The very last entry might + # not be aligned so take care to trim the binary data to size and pad with + # zeros in the row data. Bit aligned data is somewhat tricky. + # + # Example of data cut. Data cut represented in x's. + # '|' represents byte boundary. + # data = ...0XX|XXXXXX00|000... => XXXXXXXX + # or + # data = ...0XX|XXXX0000|000... => XXXXXX00 + # or + # data = ...000|XXXXXXXX|000... => XXXXXXXX + # or + # data = ...000|00XXXX00|000... => XXXX0000 + # + dataList = [] + bitRange = self._getBitRange(row, bitDepth, metrics) + stepRange = bitRange + (8,) + for curBit in range(*stepRange): + endBit = min(curBit + 8, bitRange[1]) + numBits = endBit - curBit + cutPoint = curBit % 8 + firstByteLoc = curBit // 8 + secondByteLoc = endBit // 8 + if firstByteLoc < secondByteLoc: + numBitsCut = 8 - cutPoint + else: + numBitsCut = endBit - curBit + curByte = _reverseBytes(self.imageData[firstByteLoc]) + firstHalf = byteord(curByte) >> cutPoint + firstHalf = ((1 << numBitsCut) - 1) & firstHalf + newByte = firstHalf + if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData): + curByte = _reverseBytes(self.imageData[secondByteLoc]) + secondHalf = byteord(curByte) << numBitsCut + newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1) + dataList.append(bytechr(newByte)) + + # The way the data is kept is opposite the algorithm used. + data = bytesjoin(dataList) + if not reverseBytes: + data = _reverseBytes(data) + return data + + def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + if not reverseBytes: + dataRows = list(map(_reverseBytes, dataRows)) + + # Keep track of a list of ordinal values as they are easier to modify + # than a list of strings. Map to actual strings later. + numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8 + ordDataList = [0] * numBytes + for row, data in enumerate(dataRows): + bitRange = self._getBitRange(row, bitDepth, metrics) + stepRange = bitRange + (8,) + for curBit, curByte in zip(range(*stepRange), data): + endBit = min(curBit + 8, bitRange[1]) + cutPoint = curBit % 8 + firstByteLoc = curBit // 8 + secondByteLoc = endBit // 8 + if firstByteLoc < secondByteLoc: + numBitsCut = 8 - cutPoint + else: + numBitsCut = endBit - curBit + curByte = byteord(curByte) + firstByte = curByte & ((1 << numBitsCut) - 1) + ordDataList[firstByteLoc] |= firstByte << cutPoint + if firstByteLoc < secondByteLoc and secondByteLoc < numBytes: + secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1) + ordDataList[secondByteLoc] |= secondByte + + # Save the image data with the bits going the correct way. + self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList))) + + +class ByteAlignedBitmapMixin(object): + def _getByteRange(self, row, bitDepth, metrics): + rowBytes = (bitDepth * metrics.width + 7) // 8 + byteOffset = row * rowBytes + return (byteOffset, byteOffset + rowBytes) + + def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + assert 0 <= row and row < metrics.height, "Illegal row access in bitmap" + byteRange = self._getByteRange(row, bitDepth, metrics) + data = self.imageData[slice(*byteRange)] + if reverseBytes: + data = _reverseBytes(data) + return data + + def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False): + if metrics is None: + metrics = self.metrics + if reverseBytes: + dataRows = map(_reverseBytes, dataRows) + self.imageData = bytesjoin(dataRows) + + +class ebdt_bitmap_format_1( + ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph +): + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_2( + BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph +): + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(smallGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph): + def decompile(self): + self.imageData = self.data + + def compile(self, ttFont): + return self.imageData + + +class ebdt_bitmap_format_6( + ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph +): + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ebdt_bitmap_format_7( + BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph +): + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + self.imageData = data + + def compile(self, ttFont): + data = sstruct.pack(bigGlyphMetricsFormat, self.metrics) + return data + self.imageData + + +class ComponentBitmapGlyph(BitmapGlyph): + def toXML(self, strikeIndex, glyphName, writer, ttFont): + writer.begintag(self.__class__.__name__, [("name", glyphName)]) + writer.newline() + + self.writeMetrics(writer, ttFont) + + writer.begintag("components") + writer.newline() + for curComponent in self.componentArray: + curComponent.toXML(writer, ttFont) + writer.endtag("components") + writer.newline() + + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.readMetrics(name, attrs, content, ttFont) + for element in content: + if not isinstance(element, tuple): + continue + name, attr, content = element + if name == "components": + self.componentArray = [] + for compElement in content: + if not isinstance(compElement, tuple): + continue + name, attrs, content = compElement + if name == "ebdtComponent": + curComponent = EbdtComponent() + curComponent.fromXML(name, attrs, content, ttFont) + self.componentArray.append(curComponent) + else: + log.warning("'%s' being ignored in component array.", name) + + +class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph): + def decompile(self): + self.metrics = SmallGlyphMetrics() + dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics) + data = data[1:] + + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics)) + dataList.append(b"\0") + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph): + def decompile(self): + self.metrics = BigGlyphMetrics() + dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics) + (numComponents,) = struct.unpack(">H", data[:2]) + data = data[2:] + self.componentArray = [] + for i in range(numComponents): + curComponent = EbdtComponent() + dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent) + curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode) + self.componentArray.append(curComponent) + + def compile(self, ttFont): + dataList = [] + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + dataList.append(struct.pack(">H", len(self.componentArray))) + for curComponent in self.componentArray: + curComponent.glyphCode = ttFont.getGlyphID(curComponent.name) + dataList.append(sstruct.pack(ebdtComponentFormat, curComponent)) + return bytesjoin(dataList) + + +# Dictionary of bitmap formats to the class representing that format +# currently only the ones listed in this map are the ones supported. +ebdt_bitmap_classes = { + 1: ebdt_bitmap_format_1, + 2: ebdt_bitmap_format_2, + 5: ebdt_bitmap_format_5, + 6: ebdt_bitmap_format_6, + 7: ebdt_bitmap_format_7, + 8: ebdt_bitmap_format_8, + 9: ebdt_bitmap_format_9, +} diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py new file mode 100644 index 0000000000000000000000000000000000000000..23d57964f6fcb864fe5e3884b436b96058aeaaa6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/E_B_L_C_.py @@ -0,0 +1,710 @@ +from fontTools.misc import sstruct +from . import DefaultTable +from fontTools.misc.textTools import bytesjoin, safeEval +from .BitmapGlyphMetrics import ( + BigGlyphMetrics, + bigGlyphMetricsFormat, + SmallGlyphMetrics, + smallGlyphMetricsFormat, +) +import struct +import itertools +from collections import deque +import logging + + +log = logging.getLogger(__name__) + +eblcHeaderFormat = """ + > # big endian + version: 16.16F + numSizes: I +""" +# The table format string is split to handle sbitLineMetrics simply. +bitmapSizeTableFormatPart1 = """ + > # big endian + indexSubTableArrayOffset: I + indexTablesSize: I + numberOfIndexSubTables: I + colorRef: I +""" +# The compound type for hori and vert. +sbitLineMetricsFormat = """ + > # big endian + ascender: b + descender: b + widthMax: B + caretSlopeNumerator: b + caretSlopeDenominator: b + caretOffset: b + minOriginSB: b + minAdvanceSB: b + maxBeforeBL: b + minAfterBL: b + pad1: b + pad2: b +""" +# hori and vert go between the two parts. +bitmapSizeTableFormatPart2 = """ + > # big endian + startGlyphIndex: H + endGlyphIndex: H + ppemX: B + ppemY: B + bitDepth: B + flags: b +""" + +indexSubTableArrayFormat = ">HHL" +indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat) + +indexSubHeaderFormat = ">HHL" +indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat) + +codeOffsetPairFormat = ">HH" +codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat) + + +class table_E_B_L_C_(DefaultTable.DefaultTable): + dependencies = ["EBDT"] + + # This method can be overridden in subclasses to support new formats + # without changing the other implementation. Also can be used as a + # convenience method for coverting a font file to an alternative format. + def getIndexFormatClass(self, indexFormat): + return eblc_sub_table_classes[indexFormat] + + def decompile(self, data, ttFont): + # Save the original data because offsets are from the start of the table. + origData = data + i = 0 + + dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self) + i += 8 + + self.strikes = [] + for curStrikeIndex in range(self.numSizes): + curStrike = Strike() + self.strikes.append(curStrike) + curTable = curStrike.bitmapSizeTable + dummy = sstruct.unpack2( + bitmapSizeTableFormatPart1, data[i : i + 16], curTable + ) + i += 16 + for metric in ("hori", "vert"): + metricObj = SbitLineMetrics() + vars(curTable)[metric] = metricObj + dummy = sstruct.unpack2( + sbitLineMetricsFormat, data[i : i + 12], metricObj + ) + i += 12 + dummy = sstruct.unpack( + bitmapSizeTableFormatPart2, data[i : i + 8], curTable + ) + i += 8 + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + for subtableIndex in range(curTable.numberOfIndexSubTables): + i = ( + curTable.indexSubTableArrayOffset + + subtableIndex * indexSubTableArraySize + ) + + tup = struct.unpack( + indexSubTableArrayFormat, data[i : i + indexSubTableArraySize] + ) + (firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup + i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable + + tup = struct.unpack( + indexSubHeaderFormat, data[i : i + indexSubHeaderSize] + ) + (indexFormat, imageFormat, imageDataOffset) = tup + + indexFormatClass = self.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont) + indexSubTable.firstGlyphIndex = firstGlyphIndex + indexSubTable.lastGlyphIndex = lastGlyphIndex + indexSubTable.additionalOffsetToIndexSubtable = ( + additionalOffsetToIndexSubtable + ) + indexSubTable.indexFormat = indexFormat + indexSubTable.imageFormat = imageFormat + indexSubTable.imageDataOffset = imageDataOffset + indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317 + curStrike.indexSubTables.append(indexSubTable) + + def compile(self, ttFont): + dataList = [] + self.numSizes = len(self.strikes) + dataList.append(sstruct.pack(eblcHeaderFormat, self)) + + # Data size of the header + bitmapSizeTable needs to be calculated + # in order to form offsets. This value will hold the size of the data + # in dataList after all the data is consolidated in dataList. + dataSize = len(dataList[0]) + + # The table will be structured in the following order: + # (0) header + # (1) Each bitmapSizeTable [1 ... self.numSizes] + # (2) Alternate between indexSubTableArray and indexSubTable + # for each bitmapSizeTable present. + # + # The issue is maintaining the proper offsets when table information + # gets moved around. All offsets and size information must be recalculated + # when building the table to allow editing within ttLib and also allow easy + # import/export to and from XML. All of this offset information is lost + # when exporting to XML so everything must be calculated fresh so importing + # from XML will work cleanly. Only byte offset and size information is + # calculated fresh. Count information like numberOfIndexSubTables is + # checked through assertions. If the information in this table was not + # touched or was changed properly then these types of values should match. + # + # The table will be rebuilt the following way: + # (0) Precompute the size of all the bitmapSizeTables. This is needed to + # compute the offsets properly. + # (1) For each bitmapSizeTable compute the indexSubTable and + # indexSubTableArray pair. The indexSubTable must be computed first + # so that the offset information in indexSubTableArray can be + # calculated. Update the data size after each pairing. + # (2) Build each bitmapSizeTable. + # (3) Consolidate all the data into the main dataList in the correct order. + + for _ in self.strikes: + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1) + dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat) + dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2) + + indexSubTablePairDataList = [] + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + curTable.numberOfIndexSubTables = len(curStrike.indexSubTables) + curTable.indexSubTableArrayOffset = dataSize + + # Precompute the size of the indexSubTableArray. This information + # is important for correctly calculating the new value for + # additionalOffsetToIndexSubtable. + sizeOfSubTableArray = ( + curTable.numberOfIndexSubTables * indexSubTableArraySize + ) + lowerBound = dataSize + dataSize += sizeOfSubTableArray + upperBound = dataSize + + indexSubTableDataList = [] + for indexSubTable in curStrike.indexSubTables: + indexSubTable.additionalOffsetToIndexSubtable = ( + dataSize - curTable.indexSubTableArrayOffset + ) + glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names)) + indexSubTable.firstGlyphIndex = min(glyphIds) + indexSubTable.lastGlyphIndex = max(glyphIds) + data = indexSubTable.compile(ttFont) + indexSubTableDataList.append(data) + dataSize += len(data) + curTable.startGlyphIndex = min( + ist.firstGlyphIndex for ist in curStrike.indexSubTables + ) + curTable.endGlyphIndex = max( + ist.lastGlyphIndex for ist in curStrike.indexSubTables + ) + + for i in curStrike.indexSubTables: + data = struct.pack( + indexSubHeaderFormat, + i.firstGlyphIndex, + i.lastGlyphIndex, + i.additionalOffsetToIndexSubtable, + ) + indexSubTablePairDataList.append(data) + indexSubTablePairDataList.extend(indexSubTableDataList) + curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset + + for curStrike in self.strikes: + curTable = curStrike.bitmapSizeTable + data = sstruct.pack(bitmapSizeTableFormatPart1, curTable) + dataList.append(data) + for metric in ("hori", "vert"): + metricObj = vars(curTable)[metric] + data = sstruct.pack(sbitLineMetricsFormat, metricObj) + dataList.append(data) + data = sstruct.pack(bitmapSizeTableFormatPart2, curTable) + dataList.append(data) + dataList.extend(indexSubTablePairDataList) + + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag("header", [("version", self.version)]) + writer.newline() + for curIndex, curStrike in enumerate(self.strikes): + curStrike.toXML(curIndex, writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "header": + self.version = safeEval(attrs["version"]) + elif name == "strike": + if not hasattr(self, "strikes"): + self.strikes = [] + strikeIndex = safeEval(attrs["index"]) + curStrike = Strike() + curStrike.fromXML(name, attrs, content, ttFont, self) + + # Grow the strike array to the appropriate size. The XML format + # allows for the strike index value to be out of order. + if strikeIndex >= len(self.strikes): + self.strikes += [None] * (strikeIndex + 1 - len(self.strikes)) + assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices." + self.strikes[strikeIndex] = curStrike + + +class Strike(object): + def __init__(self): + self.bitmapSizeTable = BitmapSizeTable() + self.indexSubTables = [] + + def toXML(self, strikeIndex, writer, ttFont): + writer.begintag("strike", [("index", strikeIndex)]) + writer.newline() + self.bitmapSizeTable.toXML(writer, ttFont) + writer.comment( + "GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler." + ) + writer.newline() + for indexSubTable in self.indexSubTables: + indexSubTable.toXML(writer, ttFont) + writer.endtag("strike") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont, locator): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "bitmapSizeTable": + self.bitmapSizeTable.fromXML(name, attrs, content, ttFont) + elif name.startswith(_indexSubTableSubclassPrefix): + indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :]) + indexFormatClass = locator.getIndexFormatClass(indexFormat) + indexSubTable = indexFormatClass(None, None) + indexSubTable.indexFormat = indexFormat + indexSubTable.fromXML(name, attrs, content, ttFont) + self.indexSubTables.append(indexSubTable) + + +class BitmapSizeTable(object): + # Returns all the simple metric names that bitmap size table + # cares about in terms of XML creation. + def _getXMLMetricNames(self): + dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1] + dataNames = {**dataNames, **sstruct.getformat(bitmapSizeTableFormatPart2)[1]} + # Skip the first 3 data names because they are byte offsets and counts. + return list(dataNames.keys())[3:] + + def toXML(self, writer, ttFont): + writer.begintag("bitmapSizeTable") + writer.newline() + for metric in ("hori", "vert"): + getattr(self, metric).toXML(metric, writer, ttFont) + for metricName in self._getXMLMetricNames(): + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag("bitmapSizeTable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Create a lookup for all the simple names that make sense to + # bitmap size table. Only read the information from these names. + dataNames = set(self._getXMLMetricNames()) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "sbitLineMetrics": + direction = attrs["direction"] + assert direction in ( + "hori", + "vert", + ), "SbitLineMetrics direction specified invalid." + metricObj = SbitLineMetrics() + metricObj.fromXML(name, attrs, content, ttFont) + vars(self)[direction] = metricObj + elif name in dataNames: + vars(self)[name] = safeEval(attrs["value"]) + else: + log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name) + + +class SbitLineMetrics(object): + def toXML(self, name, writer, ttFont): + writer.begintag("sbitLineMetrics", [("direction", name)]) + writer.newline() + for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag("sbitLineMetrics") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name in metricNames: + vars(self)[name] = safeEval(attrs["value"]) + + +# Important information about the naming scheme. Used for identifying subtables. +_indexSubTableSubclassPrefix = "eblc_index_sub_table_" + + +class EblcIndexSubTable(object): + def __init__(self, data, ttFont): + self.data = data + self.ttFont = ttFont + # TODO Currently non-lazy decompiling doesn't work for this class... + # if not ttFont.lazy: + # self.decompile() + # del self.data, self.ttFont + + def __getattr__(self, attr): + # Allow lazy decompile. + if attr[:2] == "__": + raise AttributeError(attr) + if attr == "data": + raise AttributeError(attr) + self.decompile() + return getattr(self, attr) + + def ensureDecompiled(self, recurse=False): + if hasattr(self, "data"): + self.decompile() + + # This method just takes care of the indexSubHeader. Implementing subclasses + # should call it to compile the indexSubHeader and then continue compiling + # the remainder of their unique format. + def compile(self, ttFont): + return struct.pack( + indexSubHeaderFormat, + self.indexFormat, + self.imageFormat, + self.imageDataOffset, + ) + + # Creates the XML for bitmap glyphs. Each index sub table basically makes + # the same XML except for specific metric information that is written + # out via a method call that a subclass implements optionally. + def toXML(self, writer, ttFont): + writer.begintag( + self.__class__.__name__, + [ + ("imageFormat", self.imageFormat), + ("firstGlyphIndex", self.firstGlyphIndex), + ("lastGlyphIndex", self.lastGlyphIndex), + ], + ) + writer.newline() + self.writeMetrics(writer, ttFont) + # Write out the names as thats all thats needed to rebuild etc. + # For font debugging of consecutive formats the ids are also written. + # The ids are not read when moving from the XML format. + glyphIds = map(ttFont.getGlyphID, self.names) + for glyphName, glyphId in zip(self.names, glyphIds): + writer.simpletag("glyphLoc", name=glyphName, id=glyphId) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + # Read all the attributes. Even though the glyph indices are + # recalculated, they are still read in case there needs to + # be an immediate export of the data. + self.imageFormat = safeEval(attrs["imageFormat"]) + self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"]) + self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"]) + + self.readMetrics(name, attrs, content, ttFont) + + self.names = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "glyphLoc": + self.names.append(attrs["name"]) + + # A helper method that writes the metrics for the index sub table. It also + # is responsible for writing the image size for fixed size data since fixed + # size is not recalculated on compile. Default behavior is to do nothing. + def writeMetrics(self, writer, ttFont): + pass + + # A helper method that is the inverse of writeMetrics. + def readMetrics(self, name, attrs, content, ttFont): + pass + + # This method is for fixed glyph data sizes. There are formats where + # the glyph data is fixed but are actually composite glyphs. To handle + # this the font spec in indexSubTable makes the data the size of the + # fixed size by padding the component arrays. This function abstracts + # out this padding process. Input is data unpadded. Output is data + # padded only in fixed formats. Default behavior is to return the data. + def padBitmapData(self, data): + return data + + # Remove any of the glyph locations and names that are flagged as skipped. + # This only occurs in formats {1,3}. + def removeSkipGlyphs(self): + # Determines if a name, location pair is a valid data location. + # Skip glyphs are marked when the size is equal to zero. + def isValidLocation(args): + (name, (startByte, endByte)) = args + return startByte < endByte + + # Remove all skip glyphs. + dataPairs = list(filter(isValidLocation, zip(self.names, self.locations))) + self.names, self.locations = list(map(list, zip(*dataPairs))) + + +# A closure for creating a custom mixin. This is done because formats 1 and 3 +# are very similar. The only difference between them is the size per offset +# value. Code put in here should handle both cases generally. +def _createOffsetArrayIndexSubTableMixin(formatStringForDataType): + # Prep the data size for the offset array data format. + dataFormat = ">" + formatStringForDataType + offsetDataSize = struct.calcsize(dataFormat) + + class OffsetArrayIndexSubTableMixin(object): + def decompile(self): + numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1 + indexingOffsets = [ + glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2) + ] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + offsetArray = [ + struct.unpack(dataFormat, self.data[slice(*loc)])[0] + for loc in indexingLocations + ] + + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) + modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray] + self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:])) + + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + self.removeSkipGlyphs() + del self.data, self.ttFont + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Formats 1 and 3 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert ( + curLoc[1] == nxtLoc[0] + ), "Data must be consecutive in indexSubTable offset formats" + + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure that all ids are sorted strictly increasing. + assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1)) + + # Run a simple algorithm to add skip glyphs to the data locations at + # the places where an id is not present. + idQueue = deque(glyphIds) + locQueue = deque(self.locations) + allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) + allLocations = [] + for curId in allGlyphIds: + if curId != idQueue[0]: + allLocations.append((locQueue[0][0], locQueue[0][0])) + else: + idQueue.popleft() + allLocations.append(locQueue.popleft()) + + # Now that all the locations are collected, pack them appropriately into + # offsets. This is the form where offset[i] is the location and + # offset[i+1]-offset[i] is the size of the data location. + offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # This offset may change the value for round tripping but is safer and + # allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsetArray = [offset - self.imageDataOffset for offset in offsets] + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList += [ + struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray + ] + # Take care of any padding issues. Only occurs in format 3. + if offsetDataSize * len(offsetArray) % 4 != 0: + dataList.append(struct.pack(dataFormat, 0)) + return bytesjoin(dataList) + + return OffsetArrayIndexSubTableMixin + + +# A Mixin for functionality shared between the different kinds +# of fixed sized data handling. Both kinds have big metrics so +# that kind of special processing is also handled in this mixin. +class FixedSizeIndexSubTableMixin(object): + def writeMetrics(self, writer, ttFont): + writer.simpletag("imageSize", value=self.imageSize) + writer.newline() + self.metrics.toXML(writer, ttFont) + + def readMetrics(self, name, attrs, content, ttFont): + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "imageSize": + self.imageSize = safeEval(attrs["value"]) + elif name == BigGlyphMetrics.__name__: + self.metrics = BigGlyphMetrics() + self.metrics.fromXML(name, attrs, content, ttFont) + elif name == SmallGlyphMetrics.__name__: + log.warning( + "SmallGlyphMetrics being ignored in format %d.", self.indexFormat + ) + + def padBitmapData(self, data): + # Make sure that the data isn't bigger than the fixed size. + assert len(data) <= self.imageSize, ( + "Data in indexSubTable format %d must be less than the fixed size." + % self.indexFormat + ) + # Pad the data so that it matches the fixed size. + pad = (self.imageSize - len(data)) * b"\0" + return data + pad + + +class eblc_index_sub_table_1( + _createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable +): + pass + + +class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + def decompile(self): + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + self.metrics = BigGlyphMetrics() + sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics) + glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1)) + offsets = [ + self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1) + ] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont + + def compile(self, ttFont): + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Make sure all the ids are consecutive. This is required by Format 2. + assert glyphIds == list( + range(self.firstGlyphIndex, self.lastGlyphIndex + 1) + ), "Format 2 ids must be consecutive." + self.imageDataOffset = min(next(iter(zip(*self.locations)))) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + return bytesjoin(dataList) + + +class eblc_index_sub_table_3( + _createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable +): + pass + + +class eblc_index_sub_table_4(EblcIndexSubTable): + def decompile(self): + (numGlyphs,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + indexingOffsets = [ + glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2) + ] + indexingLocations = zip(indexingOffsets, indexingOffsets[1:]) + glyphArray = [ + struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) + for loc in indexingLocations + ] + glyphIds, offsets = list(map(list, zip(*glyphArray))) + # There are one too many glyph ids. Get rid of the last one. + glyphIds.pop() + + offsets = [offset + self.imageDataOffset for offset in offsets] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont + + def compile(self, ttFont): + # First make sure that all the data lines up properly. Format 4 + # must have all its data lined up consecutively. If not this will fail. + for curLoc, nxtLoc in zip(self.locations, self.locations[1:]): + assert ( + curLoc[1] == nxtLoc[0] + ), "Data must be consecutive in indexSubTable format 4" + + offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]] + # Image data offset must be less than or equal to the minimum of locations. + # Resetting this offset may change the value for round tripping but is safer + # and allows imageDataOffset to not be required to be in the XML version. + self.imageDataOffset = min(offsets) + offsets = [offset - self.imageDataOffset for offset in offsets] + glyphIds = list(map(ttFont.getGlyphID, self.names)) + # Create an iterator over the ids plus a padding value. + idsPlusPad = list(itertools.chain(glyphIds, [0])) + + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", len(glyphIds))) + tmp = [ + struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets) + ] + dataList += tmp + data = bytesjoin(dataList) + return data + + +class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable): + def decompile(self): + self.origDataLen = 0 + (self.imageSize,) = struct.unpack(">L", self.data[:4]) + data = self.data[4:] + self.metrics, data = sstruct.unpack2( + bigGlyphMetricsFormat, data, BigGlyphMetrics() + ) + (numGlyphs,) = struct.unpack(">L", data[:4]) + data = data[4:] + glyphIds = [ + struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs) + ] + + offsets = [ + self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1) + ] + self.locations = list(zip(offsets, offsets[1:])) + self.names = list(map(self.ttFont.getGlyphName, glyphIds)) + del self.data, self.ttFont + + def compile(self, ttFont): + self.imageDataOffset = min(next(iter(zip(*self.locations)))) + dataList = [EblcIndexSubTable.compile(self, ttFont)] + dataList.append(struct.pack(">L", self.imageSize)) + dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics)) + glyphIds = list(map(ttFont.getGlyphID, self.names)) + dataList.append(struct.pack(">L", len(glyphIds))) + dataList += [struct.pack(">H", curId) for curId in glyphIds] + if len(glyphIds) % 2 == 1: + dataList.append(struct.pack(">H", 0)) + return bytesjoin(dataList) + + +# Dictionary of indexFormat to the class representing that format. +eblc_sub_table_classes = { + 1: eblc_index_sub_table_1, + 2: eblc_index_sub_table_2, + 3: eblc_index_sub_table_3, + 4: eblc_index_sub_table_4, + 5: eblc_index_sub_table_5, +} diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py new file mode 100644 index 0000000000000000000000000000000000000000..823ced1bafe991b73d73632773b3d7d21990b572 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F_F_T_M_.py @@ -0,0 +1,42 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.timeTools import timestampFromString, timestampToString +from . import DefaultTable + +FFTMFormat = """ + > # big endian + version: I + FFTimeStamp: Q + sourceCreated: Q + sourceModified: Q +""" + + +class table_F_F_T_M_(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(FFTMFormat, data, self) + + def compile(self, ttFont): + data = sstruct.pack(FFTMFormat, self) + return data + + def toXML(self, writer, ttFont): + writer.comment( + "FontForge's timestamp, font source creation and modification dates" + ) + writer.newline() + formatstring, names, fixes = sstruct.getformat(FFTMFormat) + for name in names: + value = getattr(self, name) + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampToString(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ("FFTimeStamp", "sourceCreated", "sourceModified"): + value = timestampFromString(value) + else: + value = safeEval(value) + setattr(self, name, value) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py new file mode 100644 index 0000000000000000000000000000000000000000..d8ae8b23bb6af53aeb08271c3d489f52a28a5e02 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_D_E_F_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_G_D_E_F_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_K_G_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_K_G_.py new file mode 100644 index 0000000000000000000000000000000000000000..eed34d92105926dcdb988ef345e8421a93b85518 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_K_G_.py @@ -0,0 +1,126 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytesjoin, safeEval, readHex +from . import DefaultTable +import sys +import array + +GPKGFormat = """ + > # big endian + version: H + flags: H + numGMAPs: H + numGlyplets: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + + +class table_G_P_K_G_(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GPKGFormat, data, self) + + GMAPoffsets = array.array("I") + endPos = (self.numGMAPs + 1) * 4 + GMAPoffsets.frombytes(newData[:endPos]) + if sys.byteorder != "big": + GMAPoffsets.byteswap() + self.GMAPs = [] + for i in range(self.numGMAPs): + start = GMAPoffsets[i] + end = GMAPoffsets[i + 1] + self.GMAPs.append(data[start:end]) + pos = endPos + endPos = pos + (self.numGlyplets + 1) * 4 + glyphletOffsets = array.array("I") + glyphletOffsets.frombytes(newData[pos:endPos]) + if sys.byteorder != "big": + glyphletOffsets.byteswap() + self.glyphlets = [] + for i in range(self.numGlyplets): + start = glyphletOffsets[i] + end = glyphletOffsets[i + 1] + self.glyphlets.append(data[start:end]) + + def compile(self, ttFont): + self.numGMAPs = len(self.GMAPs) + self.numGlyplets = len(self.glyphlets) + GMAPoffsets = [0] * (self.numGMAPs + 1) + glyphletOffsets = [0] * (self.numGlyplets + 1) + + dataList = [sstruct.pack(GPKGFormat, self)] + + pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4 + GMAPoffsets[0] = pos + for i in range(1, self.numGMAPs + 1): + pos += len(self.GMAPs[i - 1]) + GMAPoffsets[i] = pos + gmapArray = array.array("I", GMAPoffsets) + if sys.byteorder != "big": + gmapArray.byteswap() + dataList.append(gmapArray.tobytes()) + + glyphletOffsets[0] = pos + for i in range(1, self.numGlyplets + 1): + pos += len(self.glyphlets[i - 1]) + glyphletOffsets[i] = pos + glyphletArray = array.array("I", glyphletOffsets) + if sys.byteorder != "big": + glyphletArray.byteswap() + dataList.append(glyphletArray.tobytes()) + dataList += self.GMAPs + dataList += self.glyphlets + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GPKGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + + writer.begintag("GMAPs") + writer.newline() + for gmapData in self.GMAPs: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(gmapData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("GMAPs") + writer.newline() + + writer.begintag("glyphlets") + writer.newline() + for glyphletData in self.glyphlets: + writer.begintag("hexdata") + writer.newline() + writer.dumphex(glyphletData) + writer.endtag("hexdata") + writer.newline() + writer.endtag("glyphlets") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPs": + if not hasattr(self, "GMAPs"): + self.GMAPs = [] + for element in content: + if isinstance(element, str): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.GMAPs.append(readHex(itemContent)) + elif name == "glyphlets": + if not hasattr(self, "glyphlets"): + self.glyphlets = [] + for element in content: + if isinstance(element, str): + continue + itemName, itemAttrs, itemContent = element + if itemName == "hexdata": + self.glyphlets.append(readHex(itemContent)) + else: + setattr(self, name, safeEval(attrs["value"])) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py new file mode 100644 index 0000000000000000000000000000000000000000..ca8290bab440e31196dd009c5125e022a079d7af --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_P_O_S_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_G_P_O_S_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py new file mode 100644 index 0000000000000000000000000000000000000000..f1dfdaa031efa4cf733d31e7959cd906f2e4087c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G__l_a_t.py @@ -0,0 +1,234 @@ +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import floatToFixedToStr +from fontTools.misc.textTools import safeEval + +# from itertools import * +from functools import partial +from . import DefaultTable +from . import grUtils +import struct + + +Glat_format_0 = """ + > # big endian + version: 16.16F +""" + +Glat_format_3 = """ + > + version: 16.16F + compression:L # compression scheme or reserved +""" + +Glat_format_1_entry = """ + > + attNum: B # Attribute number of first attribute + num: B # Number of attributes in this run +""" +Glat_format_23_entry = """ + > + attNum: H # Attribute number of first attribute + num: H # Number of attributes in this run +""" + +Glat_format_3_octabox_metrics = """ + > + subboxBitmap: H # Which subboxes exist on 4x4 grid + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + +Glat_format_3_subbox_entry = """ + > + left: B # xi + right: B # xa + bottom: B # yi + top: B # ya + diagNegMin: B # Defines minimum negatively-sloped diagonal (si) + diagNegMax: B # Defines maximum negatively-sloped diagonal (sa) + diagPosMin: B # Defines minimum positively-sloped diagonal (di) + diagPosMax: B # Defines maximum positively-sloped diagonal (da) +""" + + +class _Object: + pass + + +class _Dict(dict): + pass + + +class table_G__l_a_t(DefaultTable.DefaultTable): + """ + Support Graphite Glat tables + """ + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.scheme = 0 + + def decompile(self, data, ttFont): + sstruct.unpack2(Glat_format_0, data, self) + self.version = float(floatToFixedToStr(self.version, precisionBits=16)) + if self.version <= 1.9: + decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry) + elif self.version <= 2.9: + decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry) + elif self.version >= 3.0: + (data, self.scheme) = grUtils.decompress(data) + sstruct.unpack2(Glat_format_3, data, self) + self.hasOctaboxes = (self.compression & 1) == 1 + decoder = self.decompileAttributes3 + + gloc = ttFont["Gloc"] + self.attributes = {} + count = 0 + for s, e in zip(gloc, gloc[1:]): + self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e]) + count += 1 + + def decompileAttributes12(self, data, fmt): + attributes = _Dict() + while len(data) > 3: + e, data = sstruct.unpack2(fmt, data, _Object()) + keys = range(e.attNum, e.attNum + e.num) + if len(data) >= 2 * e.num: + vals = struct.unpack_from((">%dh" % e.num), data) + attributes.update(zip(keys, vals)) + data = data[2 * e.num :] + return attributes + + def decompileAttributes3(self, data): + if self.hasOctaboxes: + o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object()) + numsub = bin(o.subboxBitmap).count("1") + o.subboxes = [] + for b in range(numsub): + if len(data) >= 8: + subbox, data = sstruct.unpack2( + Glat_format_3_subbox_entry, data, _Object() + ) + o.subboxes.append(subbox) + attrs = self.decompileAttributes12(data, Glat_format_23_entry) + if self.hasOctaboxes: + attrs.octabox = o + return attrs + + def compile(self, ttFont): + data = sstruct.pack(Glat_format_0, self) + if self.version <= 1.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version <= 2.9: + encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry) + elif self.version >= 3.0: + self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0) + data = sstruct.pack(Glat_format_3, self) + encoder = self.compileAttributes3 + + glocs = [] + for n in range(len(self.attributes)): + glocs.append(len(data)) + data += encoder(self.attributes[ttFont.getGlyphName(n)]) + glocs.append(len(data)) + ttFont["Gloc"].set(glocs) + + if self.version >= 3.0: + data = grUtils.compress(self.scheme, data) + return data + + def compileAttributes12(self, attrs, fmt): + data = b"" + for e in grUtils.entries(attrs): + data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack( + (">%dh" % len(e[2])), *e[2] + ) + return data + + def compileAttributes3(self, attrs): + if self.hasOctaboxes: + o = attrs.octabox + data = sstruct.pack(Glat_format_3_octabox_metrics, o) + numsub = bin(o.subboxBitmap).count("1") + for b in range(numsub): + data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b]) + else: + data = "" + return data + self.compileAttributes12(attrs, Glat_format_23_entry) + + def toXML(self, writer, ttFont): + writer.simpletag("version", version=self.version, compressionScheme=self.scheme) + writer.newline() + for n, a in sorted( + self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0]) + ): + writer.begintag("glyph", name=n) + writer.newline() + if hasattr(a, "octabox"): + o = a.octabox + formatstring, names, fixes = sstruct.getformat( + Glat_format_3_octabox_metrics + ) + vals = {} + for k in names: + if k == "subboxBitmap": + continue + vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255) + vals["bitmap"] = "{:0X}".format(o.subboxBitmap) + writer.begintag("octaboxes", **vals) + writer.newline() + formatstring, names, fixes = sstruct.getformat( + Glat_format_3_subbox_entry + ) + for s in o.subboxes: + vals = {} + for k in names: + vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255) + writer.simpletag("octabox", **vals) + writer.newline() + writer.endtag("octaboxes") + writer.newline() + for k, v in sorted(a.items()): + writer.simpletag("attribute", index=k, value=v) + writer.newline() + writer.endtag("glyph") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = float(safeEval(attrs["version"])) + self.scheme = int(safeEval(attrs["compressionScheme"])) + if name != "glyph": + return + if not hasattr(self, "attributes"): + self.attributes = {} + gname = attrs["name"] + attributes = _Dict() + for element in content: + if not isinstance(element, tuple): + continue + tag, attrs, subcontent = element + if tag == "attribute": + k = int(safeEval(attrs["index"])) + v = int(safeEval(attrs["value"])) + attributes[k] = v + elif tag == "octaboxes": + self.hasOctaboxes = True + o = _Object() + o.subboxBitmap = int(attrs["bitmap"], 16) + o.subboxes = [] + del attrs["bitmap"] + for k, v in attrs.items(): + setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5)) + for element in subcontent: + if not isinstance(element, tuple): + continue + (tag, attrs, subcontent) = element + so = _Object() + for k, v in attrs.items(): + setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5)) + o.subboxes.append(so) + attributes.octabox = o + self.attributes[gname] = attributes diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py new file mode 100644 index 0000000000000000000000000000000000000000..111c700710e56f1f92703b212b530267313293ba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/J_S_T_F_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_J_S_T_F_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py new file mode 100644 index 0000000000000000000000000000000000000000..4522c06c6bdd09a7986894dee1a98790ab352318 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_I_N_G_.py @@ -0,0 +1,92 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval +from . import DefaultTable + +SINGFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + glyphletVersion: H + permissions: h + mainGID: H + unitsPerEm: H + vertAdvance: h + vertOrigin: h + uniqueName: 28s + METAMD5: 16s + nameLength: 1s +""" +# baseGlyphName is a byte string which follows the record above. + + +class table_S_I_N_G_(DefaultTable.DefaultTable): + dependencies = [] + + def decompile(self, data, ttFont): + dummy, rest = sstruct.unpack2(SINGFormat, data, self) + self.uniqueName = self.decompileUniqueName(self.uniqueName) + self.nameLength = byteord(self.nameLength) + assert len(rest) == self.nameLength + self.baseGlyphName = tostr(rest) + + rawMETAMD5 = self.METAMD5 + self.METAMD5 = "[" + hex(byteord(self.METAMD5[0])) + for char in rawMETAMD5[1:]: + self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char)) + self.METAMD5 = self.METAMD5 + "]" + + def decompileUniqueName(self, data): + name = "" + for char in data: + val = byteord(char) + if val == 0: + break + if (val > 31) or (val < 128): + name += chr(val) + else: + octString = oct(val) + if len(octString) > 3: + octString = octString[1:] # chop off that leading zero. + elif len(octString) < 3: + octString.zfill(3) + name += "\\" + octString + return name + + def compile(self, ttFont): + d = self.__dict__.copy() + d["nameLength"] = bytechr(len(self.baseGlyphName)) + d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28) + METAMD5List = eval(self.METAMD5) + d["METAMD5"] = b"" + for val in METAMD5List: + d["METAMD5"] += bytechr(val) + assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table" + data = sstruct.pack(SINGFormat, d) + data = data + tobytes(self.baseGlyphName) + return data + + def compilecompileUniqueName(self, name, length): + nameLen = len(name) + if length <= nameLen: + name = name[: length - 1] + "\000" + else: + name += (nameLen - length) * "\000" + return name + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(SINGFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("baseGlyphName", value=self.baseGlyphName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name in ["uniqueName", "METAMD5", "baseGlyphName"]: + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py new file mode 100644 index 0000000000000000000000000000000000000000..1769de91b5f0416354e040b52e3615c6824fd2f9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_T_A_T_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_S_T_A_T_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_B_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_B_.py new file mode 100644 index 0000000000000000000000000000000000000000..8a6c14c444595508c35bdc6ebace60b4bbbbdaba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_B_.py @@ -0,0 +1,5 @@ +from .T_S_I_V_ import table_T_S_I_V_ + + +class table_T_S_I_B_(table_T_S_I_V_): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_D_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_D_.py new file mode 100644 index 0000000000000000000000000000000000000000..536ff2f98a0abb8b27fe6da44199534a32fd0c3e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_D_.py @@ -0,0 +1,5 @@ +from .T_S_I_V_ import table_T_S_I_V_ + + +class table_T_S_I_D_(table_T_S_I_V_): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py new file mode 100644 index 0000000000000000000000000000000000000000..1abc02590c240377177d4ac12fe4848720e24959 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_P_.py @@ -0,0 +1,5 @@ +from .T_S_I_V_ import table_T_S_I_V_ + + +class table_T_S_I_P_(table_T_S_I_V_): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py new file mode 100644 index 0000000000000000000000000000000000000000..a9d04a09b027dcba994d576a33676bc37b18565f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__1.py @@ -0,0 +1,164 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI1 contains the text of the glyph programs in the form of low-level assembly +code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'. +""" + +from . import DefaultTable +from fontTools.misc.loggingTools import LogMixin +from fontTools.misc.textTools import strjoin, tobytes, tostr + + +class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable): + extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"} + + indextable = "TSI0" + + def decompile(self, data, ttFont): + totalLength = len(data) + indextable = ttFont[self.indextable] + for indices, isExtra in zip( + (indextable.indices, indextable.extra_indices), (False, True) + ): + programs = {} + for i, (glyphID, textLength, textOffset) in enumerate(indices): + if isExtra: + name = self.extras[glyphID] + else: + name = ttFont.getGlyphName(glyphID) + if textOffset > totalLength: + self.log.warning("textOffset > totalLength; %r skipped" % name) + continue + if textLength < 0x8000: + # If the length stored in the record is less than 32768, then use + # that as the length of the record. + pass + elif textLength == 0x8000: + # If the length is 32768, compute the actual length as follows: + isLast = i == (len(indices) - 1) + if isLast: + if isExtra: + # For the last "extra" record (the very last record of the + # table), the length is the difference between the total + # length of the TSI1 table and the textOffset of the final + # record. + nextTextOffset = totalLength + else: + # For the last "normal" record (the last record just prior + # to the record containing the "magic number"), the length + # is the difference between the textOffset of the record + # following the "magic number" (0xFFFE) record (i.e. the + # first "extra" record), and the textOffset of the last + # "normal" record. + nextTextOffset = indextable.extra_indices[0][2] + else: + # For all other records with a length of 0x8000, the length is + # the difference between the textOffset of the record in + # question and the textOffset of the next record. + nextTextOffset = indices[i + 1][2] + assert nextTextOffset >= textOffset, "entries not sorted by offset" + if nextTextOffset > totalLength: + self.log.warning( + "nextTextOffset > totalLength; %r truncated" % name + ) + nextTextOffset = totalLength + textLength = nextTextOffset - textOffset + else: + from fontTools import ttLib + + raise ttLib.TTLibError( + "%r textLength (%d) must not be > 32768" % (name, textLength) + ) + text = data[textOffset : textOffset + textLength] + assert len(text) == textLength + text = tostr(text, encoding="utf-8") + if text: + programs[name] = text + if isExtra: + self.extraPrograms = programs + else: + self.glyphPrograms = programs + + def compile(self, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + data = b"" + indextable = ttFont[self.indextable] + glyphNames = ttFont.getGlyphOrder() + + indices = [] + for i in range(len(glyphNames)): + if len(data) % 2: + data = ( + data + b"\015" + ) # align on 2-byte boundaries, fill with return chars. Yum. + name = glyphNames[i] + if name in self.glyphPrograms: + text = tobytes(self.glyphPrograms[name], encoding="utf-8") + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 + indices.append((i, textLength, len(data))) + data = data + text + + extra_indices = [] + codes = sorted(self.extras.items()) + for i in range(len(codes)): + if len(data) % 2: + data = ( + data + b"\015" + ) # align on 2-byte boundaries, fill with return chars. + code, name = codes[i] + if name in self.extraPrograms: + text = tobytes(self.extraPrograms[name], encoding="utf-8") + else: + text = b"" + textLength = len(text) + if textLength >= 0x8000: + textLength = 0x8000 + extra_indices.append((code, textLength, len(data))) + data = data + text + indextable.set(indices, extra_indices) + return data + + def toXML(self, writer, ttFont): + names = sorted(self.glyphPrograms.keys()) + writer.newline() + for name in names: + text = self.glyphPrograms[name] + if not text: + continue + writer.begintag("glyphProgram", name=name) + writer.newline() + writer.write_noindent(text.replace("\r", "\n")) + writer.newline() + writer.endtag("glyphProgram") + writer.newline() + writer.newline() + extra_names = sorted(self.extraPrograms.keys()) + for name in extra_names: + text = self.extraPrograms[name] + if not text: + continue + writer.begintag("extraProgram", name=name) + writer.newline() + writer.write_noindent(text.replace("\r", "\n")) + writer.newline() + writer.endtag("extraProgram") + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphPrograms"): + self.glyphPrograms = {} + self.extraPrograms = {} + lines = strjoin(content).replace("\r", "\n").split("\n") + text = "\r".join(lines[1:-1]) + if name == "glyphProgram": + self.glyphPrograms[attrs["name"]] = text + elif name == "extraProgram": + self.extraPrograms[attrs["name"]] = text diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__5.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__5.py new file mode 100644 index 0000000000000000000000000000000000000000..d86798695ca91528043a598f855222f990dc8cc6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__5.py @@ -0,0 +1,47 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI5 contains the VTT character groups. +""" + +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import array + + +class table_T_S_I__5(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + numGlyphs = ttFont["maxp"].numGlyphs + assert len(data) == 2 * numGlyphs + a = array.array("H") + a.frombytes(data) + if sys.byteorder != "big": + a.byteswap() + self.glyphGrouping = {} + for i in range(numGlyphs): + self.glyphGrouping[ttFont.getGlyphName(i)] = a[i] + + def compile(self, ttFont): + glyphNames = ttFont.getGlyphOrder() + a = array.array("H") + for i in range(len(glyphNames)): + a.append(self.glyphGrouping.get(glyphNames[i], 0)) + if sys.byteorder != "big": + a.byteswap() + return a.tobytes() + + def toXML(self, writer, ttFont): + names = sorted(self.glyphGrouping.keys()) + for glyphName in names: + writer.simpletag( + "glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName] + ) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "glyphGrouping"): + self.glyphGrouping = {} + if name != "glyphgroup": + return + self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"]) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py new file mode 100644 index 0000000000000000000000000000000000000000..a98bca2e0e117fb0c8c4ec07e9e0977428652d92 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/TupleVariation.py @@ -0,0 +1,846 @@ +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi, + floatToFixedToStr as fl2str, + strToFixedToFloat as str2fl, + otRound, +) +from fontTools.misc.textTools import safeEval +import array +from collections import Counter, defaultdict +import io +import logging +import struct +import sys + + +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm + +EMBEDDED_PEAK_TUPLE = 0x8000 +INTERMEDIATE_REGION = 0x4000 +PRIVATE_POINT_NUMBERS = 0x2000 + +DELTAS_ARE_ZERO = 0x80 +DELTAS_ARE_WORDS = 0x40 +DELTAS_ARE_LONGS = 0xC0 +DELTAS_SIZE_MASK = 0xC0 +DELTA_RUN_COUNT_MASK = 0x3F + +POINTS_ARE_WORDS = 0x80 +POINT_RUN_COUNT_MASK = 0x7F + +TUPLES_SHARE_POINT_NUMBERS = 0x8000 +TUPLE_COUNT_MASK = 0x0FFF +TUPLE_INDEX_MASK = 0x0FFF + +log = logging.getLogger(__name__) + + +class TupleVariation(object): + def __init__(self, axes, coordinates): + self.axes = axes.copy() + self.coordinates = list(coordinates) + + def __repr__(self): + axes = ",".join( + sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()]) + ) + return "" % (axes, self.coordinates) + + def __eq__(self, other): + return self.coordinates == other.coordinates and self.axes == other.axes + + def getUsedPoints(self): + # Empty set means "all points used". + if None not in self.coordinates: + return frozenset() + used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None]) + # Return None if no points used. + return used if used else None + + def hasImpact(self): + """Returns True if this TupleVariation has any visible impact. + + If the result is False, the TupleVariation can be omitted from the font + without making any visible difference. + """ + return any(c is not None for c in self.coordinates) + + def toXML(self, writer, axisTags): + writer.begintag("tuple") + writer.newline() + for axis in axisTags: + value = self.axes.get(axis) + if value is not None: + minValue, value, maxValue = value + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if minValue == defaultMinValue and maxValue == defaultMaxValue: + writer.simpletag("coord", axis=axis, value=fl2str(value, 14)) + else: + attrs = [ + ("axis", axis), + ("min", fl2str(minValue, 14)), + ("value", fl2str(value, 14)), + ("max", fl2str(maxValue, 14)), + ] + writer.simpletag("coord", attrs) + writer.newline() + wrote_any_deltas = False + for i, delta in enumerate(self.coordinates): + if type(delta) == tuple and len(delta) == 2: + writer.simpletag("delta", pt=i, x=delta[0], y=delta[1]) + writer.newline() + wrote_any_deltas = True + elif type(delta) == int: + writer.simpletag("delta", cvt=i, value=delta) + writer.newline() + wrote_any_deltas = True + elif delta is not None: + log.error("bad delta format") + writer.comment("bad delta #%d" % i) + writer.newline() + wrote_any_deltas = True + if not wrote_any_deltas: + writer.comment("no deltas") + writer.newline() + writer.endtag("tuple") + writer.newline() + + def fromXML(self, name, attrs, _content): + if name == "coord": + axis = attrs["axis"] + value = str2fl(attrs["value"], 14) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + minValue = str2fl(attrs.get("min", defaultMinValue), 14) + maxValue = str2fl(attrs.get("max", defaultMaxValue), 14) + self.axes[axis] = (minValue, value, maxValue) + elif name == "delta": + if "pt" in attrs: + point = safeEval(attrs["pt"]) + x = safeEval(attrs["x"]) + y = safeEval(attrs["y"]) + self.coordinates[point] = (x, y) + elif "cvt" in attrs: + cvt = safeEval(attrs["cvt"]) + value = safeEval(attrs["value"]) + self.coordinates[cvt] = value + else: + log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys()))) + + def compile(self, axisTags, sharedCoordIndices={}, pointData=None): + assert set(self.axes.keys()) <= set(axisTags), ( + "Unknown axis tag found.", + self.axes.keys(), + axisTags, + ) + + tupleData = [] + auxData = [] + + if pointData is None: + usedPoints = self.getUsedPoints() + if usedPoints is None: # Nothing to encode + return b"", b"" + pointData = self.compilePoints(usedPoints) + + coord = self.compileCoord(axisTags) + flags = sharedCoordIndices.get(coord) + if flags is None: + flags = EMBEDDED_PEAK_TUPLE + tupleData.append(coord) + + intermediateCoord = self.compileIntermediateCoord(axisTags) + if intermediateCoord is not None: + flags |= INTERMEDIATE_REGION + tupleData.append(intermediateCoord) + + # pointData of b'' implies "use shared points". + if pointData: + flags |= PRIVATE_POINT_NUMBERS + auxData.append(pointData) + + auxData.append(self.compileDeltas()) + auxData = b"".join(auxData) + + tupleData.insert(0, struct.pack(">HH", len(auxData), flags)) + return b"".join(tupleData), auxData + + def compileCoord(self, axisTags): + result = [] + axes = self.axes + for axis in axisTags: + triple = axes.get(axis) + if triple is None: + result.append(b"\0\0") + else: + result.append(struct.pack(">h", fl2fi(triple[1], 14))) + return b"".join(result) + + def compileIntermediateCoord(self, axisTags): + needed = False + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + if (minValue != defaultMinValue) or (maxValue != defaultMaxValue): + needed = True + break + if not needed: + return None + minCoords = [] + maxCoords = [] + for axis in axisTags: + minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0)) + minCoords.append(struct.pack(">h", fl2fi(minValue, 14))) + maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14))) + return b"".join(minCoords + maxCoords) + + @staticmethod + def decompileCoord_(axisTags, data, offset): + coord = {} + pos = offset + for axis in axisTags: + coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14) + pos += 2 + return coord, pos + + @staticmethod + def compilePoints(points): + # If the set consists of all points in the glyph, it gets encoded with + # a special encoding: a single zero byte. + # + # To use this optimization, points passed in must be empty set. + # The following two lines are not strictly necessary as the main code + # below would emit the same. But this is most common and faster. + if not points: + return b"\0" + + # In the 'gvar' table, the packing of point numbers is a little surprising. + # It consists of multiple runs, each being a delta-encoded list of integers. + # For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as + # [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1. + # There are two types of runs, with values being either 8 or 16 bit unsigned + # integers. + points = list(points) + points.sort() + numPoints = len(points) + + result = bytearray() + # The binary representation starts with the total number of points in the set, + # encoded into one or two bytes depending on the value. + if numPoints < 0x80: + result.append(numPoints) + else: + result.append((numPoints >> 8) | 0x80) + result.append(numPoints & 0xFF) + + MAX_RUN_LENGTH = 127 + pos = 0 + lastValue = 0 + while pos < numPoints: + runLength = 0 + + headerPos = len(result) + result.append(0) + + useByteEncoding = None + while pos < numPoints and runLength <= MAX_RUN_LENGTH: + curValue = points[pos] + delta = curValue - lastValue + if useByteEncoding is None: + useByteEncoding = 0 <= delta <= 0xFF + if useByteEncoding and (delta > 0xFF or delta < 0): + # we need to start a new run (which will not use byte encoding) + break + # TODO This never switches back to a byte-encoding from a short-encoding. + # That's suboptimal. + if useByteEncoding: + result.append(delta) + else: + result.append(delta >> 8) + result.append(delta & 0xFF) + lastValue = curValue + pos += 1 + runLength += 1 + if useByteEncoding: + result[headerPos] = runLength - 1 + else: + result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS + + return result + + @staticmethod + def decompilePoints_(numPoints, data, offset, tableTag): + """(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)""" + assert tableTag in ("cvar", "gvar") + pos = offset + numPointsInData = data[pos] + pos += 1 + if (numPointsInData & POINTS_ARE_WORDS) != 0: + numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos] + pos += 1 + if numPointsInData == 0: + return (range(numPoints), pos) + + result = [] + while len(result) < numPointsInData: + runHeader = data[pos] + pos += 1 + numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1 + point = 0 + if (runHeader & POINTS_ARE_WORDS) != 0: + points = array.array("H") + pointsSize = numPointsInRun * 2 + else: + points = array.array("B") + pointsSize = numPointsInRun + points.frombytes(data[pos : pos + pointsSize]) + if sys.byteorder != "big": + points.byteswap() + + assert len(points) == numPointsInRun + pos += pointsSize + + result.extend(points) + + # Convert relative to absolute + absolute = [] + current = 0 + for delta in result: + current += delta + absolute.append(current) + result = absolute + del absolute + + badPoints = {str(p) for p in result if p < 0 or p >= numPoints} + if badPoints: + log.warning( + "point %s out of range in '%s' table" + % (",".join(sorted(badPoints)), tableTag) + ) + return (result, pos) + + def compileDeltas(self): + deltaX = [] + deltaY = [] + if self.getCoordWidth() == 2: + for c in self.coordinates: + if c is None: + continue + deltaX.append(c[0]) + deltaY.append(c[1]) + else: + for c in self.coordinates: + if c is None: + continue + deltaX.append(c) + bytearr = bytearray() + self.compileDeltaValues_(deltaX, bytearr) + self.compileDeltaValues_(deltaY, bytearr) + return bytearr + + @staticmethod + def compileDeltaValues_(deltas, bytearr=None): + """[value1, value2, value3, ...] --> bytearray + + Emits a sequence of runs. Each run starts with a + byte-sized header whose 6 least significant bits + (header & 0x3F) indicate how many values are encoded + in this run. The stored length is the actual length + minus one; run lengths are thus in the range [1..64]. + If the header byte has its most significant bit (0x80) + set, all values in this run are zero, and no data + follows. Otherwise, the header byte is followed by + ((header & 0x3F) + 1) signed values. If (header & + 0x40) is clear, the delta values are stored as signed + bytes; if (header & 0x40) is set, the delta values are + signed 16-bit integers. + """ # Explaining the format because the 'gvar' spec is hard to understand. + if bytearr is None: + bytearr = bytearray() + pos = 0 + numDeltas = len(deltas) + while pos < numDeltas: + value = deltas[pos] + if value == 0: + pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr) + elif -128 <= value <= 127: + pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr) + elif -32768 <= value <= 32767: + pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr) + else: + pos = TupleVariation.encodeDeltaRunAsLongs_(deltas, pos, bytearr) + return bytearr + + @staticmethod + def encodeDeltaRunAsZeroes_(deltas, offset, bytearr): + pos = offset + numDeltas = len(deltas) + while pos < numDeltas and deltas[pos] == 0: + pos += 1 + runLength = pos - offset + while runLength >= 64: + bytearr.append(DELTAS_ARE_ZERO | 63) + runLength -= 64 + if runLength: + bytearr.append(DELTAS_ARE_ZERO | (runLength - 1)) + return pos + + @staticmethod + def encodeDeltaRunAsBytes_(deltas, offset, bytearr): + pos = offset + numDeltas = len(deltas) + while pos < numDeltas: + value = deltas[pos] + if not (-128 <= value <= 127): + break + # Within a byte-encoded run of deltas, a single zero + # is best stored literally as 0x00 value. However, + # if are two or more zeroes in a sequence, it is + # better to start a new run. For example, the sequence + # of deltas [15, 15, 0, 15, 15] becomes 6 bytes + # (04 0F 0F 00 0F 0F) when storing the zero value + # literally, but 7 bytes (01 0F 0F 80 01 0F 0F) + # when starting a new run. + if value == 0 and pos + 1 < numDeltas and deltas[pos + 1] == 0: + break + pos += 1 + runLength = pos - offset + while runLength >= 64: + bytearr.append(63) + bytearr.extend(array.array("b", deltas[offset : offset + 64])) + offset += 64 + runLength -= 64 + if runLength: + bytearr.append(runLength - 1) + bytearr.extend(array.array("b", deltas[offset:pos])) + return pos + + @staticmethod + def encodeDeltaRunAsWords_(deltas, offset, bytearr): + pos = offset + numDeltas = len(deltas) + while pos < numDeltas: + value = deltas[pos] + + # Within a word-encoded run of deltas, it is easiest + # to start a new run (with a different encoding) + # whenever we encounter a zero value. For example, + # the sequence [0x6666, 0, 0x7777] needs 7 bytes when + # storing the zero literally (42 66 66 00 00 77 77), + # and equally 7 bytes when starting a new run + # (40 66 66 80 40 77 77). + if value == 0: + break + + # Within a word-encoded run of deltas, a single value + # in the range (-128..127) should be encoded literally + # because it is more compact. For example, the sequence + # [0x6666, 2, 0x7777] becomes 7 bytes when storing + # the value literally (42 66 66 00 02 77 77), but 8 bytes + # when starting a new run (40 66 66 00 02 40 77 77). + if ( + (-128 <= value <= 127) + and pos + 1 < numDeltas + and (-128 <= deltas[pos + 1] <= 127) + ): + break + + if not (-32768 <= value <= 32767): + break + + pos += 1 + runLength = pos - offset + while runLength >= 64: + bytearr.append(DELTAS_ARE_WORDS | 63) + a = array.array("h", deltas[offset : offset + 64]) + if sys.byteorder != "big": + a.byteswap() + bytearr.extend(a) + offset += 64 + runLength -= 64 + if runLength: + bytearr.append(DELTAS_ARE_WORDS | (runLength - 1)) + a = array.array("h", deltas[offset:pos]) + if sys.byteorder != "big": + a.byteswap() + bytearr.extend(a) + return pos + + @staticmethod + def encodeDeltaRunAsLongs_(deltas, offset, bytearr): + pos = offset + numDeltas = len(deltas) + while pos < numDeltas: + value = deltas[pos] + if -32768 <= value <= 32767: + break + pos += 1 + runLength = pos - offset + while runLength >= 64: + bytearr.append(DELTAS_ARE_LONGS | 63) + a = array.array("i", deltas[offset : offset + 64]) + if sys.byteorder != "big": + a.byteswap() + bytearr.extend(a) + offset += 64 + runLength -= 64 + if runLength: + bytearr.append(DELTAS_ARE_LONGS | (runLength - 1)) + a = array.array("i", deltas[offset:pos]) + if sys.byteorder != "big": + a.byteswap() + bytearr.extend(a) + return pos + + @staticmethod + def decompileDeltas_(numDeltas, data, offset=0): + """(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)""" + result = [] + pos = offset + while len(result) < numDeltas if numDeltas is not None else pos < len(data): + runHeader = data[pos] + pos += 1 + numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1 + if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_ZERO: + result.extend([0] * numDeltasInRun) + else: + if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_LONGS: + deltas = array.array("i") + deltasSize = numDeltasInRun * 4 + elif (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_WORDS: + deltas = array.array("h") + deltasSize = numDeltasInRun * 2 + else: + deltas = array.array("b") + deltasSize = numDeltasInRun + deltas.frombytes(data[pos : pos + deltasSize]) + if sys.byteorder != "big": + deltas.byteswap() + assert len(deltas) == numDeltasInRun, (len(deltas), numDeltasInRun) + pos += deltasSize + result.extend(deltas) + assert numDeltas is None or len(result) == numDeltas + return (result, pos) + + @staticmethod + def getTupleSize_(flags, axisCount): + size = 4 + if (flags & EMBEDDED_PEAK_TUPLE) != 0: + size += axisCount * 2 + if (flags & INTERMEDIATE_REGION) != 0: + size += axisCount * 4 + return size + + def getCoordWidth(self): + """Return 2 if coordinates are (x, y) as in gvar, 1 if single values + as in cvar, or 0 if empty. + """ + firstDelta = next((c for c in self.coordinates if c is not None), None) + if firstDelta is None: + return 0 # empty or has no impact + if type(firstDelta) in (int, float): + return 1 + if type(firstDelta) is tuple and len(firstDelta) == 2: + return 2 + raise TypeError( + "invalid type of delta; expected (int or float) number, or " + "Tuple[number, number]: %r" % firstDelta + ) + + def scaleDeltas(self, scalar): + if scalar == 1.0: + return # no change + coordWidth = self.getCoordWidth() + self.coordinates = [ + ( + None + if d is None + else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar) + ) + for d in self.coordinates + ] + + def roundDeltas(self): + coordWidth = self.getCoordWidth() + self.coordinates = [ + ( + None + if d is None + else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1])) + ) + for d in self.coordinates + ] + + def calcInferredDeltas(self, origCoords, endPts): + from fontTools.varLib.iup import iup_delta + + if self.getCoordWidth() == 1: + raise TypeError("Only 'gvar' TupleVariation can have inferred deltas") + if None in self.coordinates: + if len(self.coordinates) != len(origCoords): + raise ValueError( + "Expected len(origCoords) == %d; found %d" + % (len(self.coordinates), len(origCoords)) + ) + self.coordinates = iup_delta(self.coordinates, origCoords, endPts) + + def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False): + from fontTools.varLib.iup import iup_delta_optimize + + if None in self.coordinates: + return # already optimized + + deltaOpt = iup_delta_optimize( + self.coordinates, origCoords, endPts, tolerance=tolerance + ) + if None in deltaOpt: + if isComposite and all(d is None for d in deltaOpt): + # Fix for macOS composites + # https://github.com/fonttools/fonttools/issues/1381 + deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1) + # Use "optimized" version only if smaller... + varOpt = TupleVariation(self.axes, deltaOpt) + + # Shouldn't matter that this is different from fvar...? + axisTags = sorted(self.axes.keys()) + tupleData, auxData = self.compile(axisTags) + unoptimizedLength = len(tupleData) + len(auxData) + tupleData, auxData = varOpt.compile(axisTags) + optimizedLength = len(tupleData) + len(auxData) + + if optimizedLength < unoptimizedLength: + self.coordinates = varOpt.coordinates + + def __imul__(self, scalar): + self.scaleDeltas(scalar) + return self + + def __iadd__(self, other): + if not isinstance(other, TupleVariation): + return NotImplemented + deltas1 = self.coordinates + length = len(deltas1) + deltas2 = other.coordinates + if len(deltas2) != length: + raise ValueError("cannot sum TupleVariation deltas with different lengths") + # 'None' values have different meanings in gvar vs cvar TupleVariations: + # within the gvar, when deltas are not provided explicitly for some points, + # they need to be inferred; whereas for the 'cvar' table, if deltas are not + # provided for some CVT values, then no adjustments are made (i.e. None == 0). + # Thus, we cannot sum deltas for gvar TupleVariations if they contain + # inferred inferred deltas (the latter need to be computed first using + # 'calcInferredDeltas' method), but we can treat 'None' values in cvar + # deltas as if they are zeros. + if self.getCoordWidth() == 2: + for i, d2 in zip(range(length), deltas2): + d1 = deltas1[i] + try: + deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1]) + except TypeError: + raise ValueError("cannot sum gvar deltas with inferred points") + else: + for i, d2 in zip(range(length), deltas2): + d1 = deltas1[i] + if d1 is not None and d2 is not None: + deltas1[i] = d1 + d2 + elif d1 is None and d2 is not None: + deltas1[i] = d2 + # elif d2 is None do nothing + return self + + +def decompileSharedTuples(axisTags, sharedTupleCount, data, offset): + result = [] + for _ in range(sharedTupleCount): + t, offset = TupleVariation.decompileCoord_(axisTags, data, offset) + result.append(t) + return result + + +def compileSharedTuples( + axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1 +): + coordCount = Counter() + for var in variations: + coord = var.compileCoord(axisTags) + coordCount[coord] += 1 + # In python < 3.7, most_common() ordering is non-deterministic + # so apply a sort to make sure the ordering is consistent. + sharedCoords = sorted( + coordCount.most_common(MAX_NUM_SHARED_COORDS), + key=lambda item: (-item[1], item[0]), + ) + return [c[0] for c in sharedCoords if c[1] > 1] + + +def compileTupleVariationStore( + variations, pointCount, axisTags, sharedTupleIndices, useSharedPoints=True +): + # pointCount is actually unused. Keeping for API compat. + del pointCount + newVariations = [] + pointDatas = [] + # Compile all points and figure out sharing if desired + sharedPoints = None + + # Collect, count, and compile point-sets for all variation sets + pointSetCount = defaultdict(int) + for v in variations: + points = v.getUsedPoints() + if points is None: # Empty variations + continue + pointSetCount[points] += 1 + newVariations.append(v) + pointDatas.append(points) + variations = newVariations + del newVariations + + if not variations: + return (0, b"", b"") + + n = len(variations[0].coordinates) + assert all( + len(v.coordinates) == n for v in variations + ), "Variation sets have different sizes" + + compiledPoints = { + pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount + } + + tupleVariationCount = len(variations) + tuples = [] + data = [] + + if useSharedPoints: + # Find point-set which saves most bytes. + def key(pn): + pointSet = pn[0] + count = pn[1] + return len(compiledPoints[pointSet]) * (count - 1) + + sharedPoints = max(pointSetCount.items(), key=key)[0] + + data.append(compiledPoints[sharedPoints]) + tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS + + # b'' implies "use shared points" + pointDatas = [ + compiledPoints[points] if points != sharedPoints else b"" + for points in pointDatas + ] + + for v, p in zip(variations, pointDatas): + thisTuple, thisData = v.compile(axisTags, sharedTupleIndices, pointData=p) + + tuples.append(thisTuple) + data.append(thisData) + + tuples = b"".join(tuples) + data = b"".join(data) + return tupleVariationCount, tuples, data + + +def decompileTupleVariationStore( + tableTag, + axisTags, + tupleVariationCount, + pointCount, + sharedTuples, + data, + pos, + dataPos, +): + numAxes = len(axisTags) + result = [] + if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0: + sharedPoints, dataPos = TupleVariation.decompilePoints_( + pointCount, data, dataPos, tableTag + ) + else: + sharedPoints = [] + for _ in range(tupleVariationCount & TUPLE_COUNT_MASK): + dataSize, flags = struct.unpack(">HH", data[pos : pos + 4]) + tupleSize = TupleVariation.getTupleSize_(flags, numAxes) + tupleData = data[pos : pos + tupleSize] + pointDeltaData = data[dataPos : dataPos + dataSize] + result.append( + decompileTupleVariation_( + pointCount, + sharedTuples, + sharedPoints, + tableTag, + axisTags, + tupleData, + pointDeltaData, + ) + ) + pos += tupleSize + dataPos += dataSize + return result + + +def decompileTupleVariation_( + pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData +): + assert tableTag in ("cvar", "gvar"), tableTag + flags = struct.unpack(">H", data[2:4])[0] + pos = 4 + if (flags & EMBEDDED_PEAK_TUPLE) == 0: + peak = sharedTuples[flags & TUPLE_INDEX_MASK] + else: + peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + if (flags & INTERMEDIATE_REGION) != 0: + start, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + end, pos = TupleVariation.decompileCoord_(axisTags, data, pos) + else: + start, end = inferRegion_(peak) + axes = {} + for axis in axisTags: + region = start[axis], peak[axis], end[axis] + if region != (0.0, 0.0, 0.0): + axes[axis] = region + pos = 0 + if (flags & PRIVATE_POINT_NUMBERS) != 0: + points, pos = TupleVariation.decompilePoints_( + pointCount, tupleData, pos, tableTag + ) + else: + points = sharedPoints + + deltas = [None] * pointCount + + if tableTag == "cvar": + deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) + for p, delta in zip(points, deltas_cvt): + if 0 <= p < pointCount: + deltas[p] = delta + + elif tableTag == "gvar": + deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) + deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos) + for p, x, y in zip(points, deltas_x, deltas_y): + if 0 <= p < pointCount: + deltas[p] = (x, y) + + return TupleVariation(axes, deltas) + + +def inferRegion_(peak): + """Infer start and end for a (non-intermediate) region + + This helper function computes the applicability region for + variation tuples whose INTERMEDIATE_REGION flag is not set in the + TupleVariationHeader structure. Variation tuples apply only to + certain regions of the variation space; outside that region, the + tuple has no effect. To make the binary encoding more compact, + TupleVariationHeaders can omit the intermediateStartTuple and + intermediateEndTuple fields. + """ + start, end = {}, {} + for axis, value in peak.items(): + start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0 + end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7 + return (start, end) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py new file mode 100644 index 0000000000000000000000000000000000000000..5a00887160c1d174f5fa7ac241b486278a24b3d6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_A_R_C_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_V_A_R_C_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/B_A_S_E_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/B_A_S_E_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..561f6ef970cb7e2c455463032e07cdace62557f9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/B_A_S_E_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/BitmapGlyphMetrics.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/BitmapGlyphMetrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..260435c43fdd3954e7fdd0f28be2c01a8e444bcd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/BitmapGlyphMetrics.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/D__e_b_g.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/D__e_b_g.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67579c415cdfc9573662f8a894ab9209cec93fc1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/D__e_b_g.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ace6211431673546127b9321e207b02de8aeebe3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/F__e_a_t.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_K_G_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_K_G_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6982f03a5a79b2d085f34a186c6e48002c5245b4 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_K_G_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8183951a3b3c4e68a23de609c35f07e19eefa462 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_P_O_S_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1401ee0a9a7f60b9a0fb934995c6d4de1a310966 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G_S_U_B_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_a_t.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_a_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f9ee56bf2753d386282df8ac14db0ae5570f470 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/G__l_a_t.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/J_S_T_F_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/J_S_T_F_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11d409c1510fb166eb1cca6c9368b3b6f29f7b25 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/J_S_T_F_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83c26676086c7a6e216d92d0badb48838ab9301e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/L_T_S_H_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_V_A_R_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_V_A_R_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..604db49dc4b72df2f91433581581378fb9920537 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/M_V_A_R_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cbd19e79d1f7535a725d3408e290843041b3105 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_J_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0d9cbc1388296183c05a853b6536f8697efa93b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I_S_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66a4a6b99173524246eebfaec53b85545f261903 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__0.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20e19df329fd550c4fd316f6aa9d783f51c72d01 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_S_I__5.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a962f0812fc4d7e1d5ae68d5147e3de4fa0ba734 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/T_T_F_A_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5be75fbae55787a36c450a039c75bc51f55645a5 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/TupleVariation.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa5330173e96c8239672ef69b6f20b061d84e696 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_O_R_G_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..629f3f774f41abd705f41b09652179c3709474b8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/V_V_A_R_.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa5f9d2f09abece3373e3dab06454d1def029893 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_a_v_a_r.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68e7c5b2937f04e36b65b19dd7af889ecc17655c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_m_a_p.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad2aca70f54d67a23a4bf05e05b098e4a83e589f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_c_v_t.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_a_s_p.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_a_s_p.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b9a8981add9059e271b8149eea64090adddca48 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_a_s_p.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_c_i_d.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_c_i_d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7a4c534dd0f4bc3ab2ec2a36304f04a469d66d8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_g_c_i_d.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_c_a_r.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_c_a_r.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49c290017339853d3955522d1e050ec22a3a2948 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_c_a_r.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a27e9f12749f85f32a2b41a28a4643b023ee16e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_l_t_a_g.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_e_t_a.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_e_t_a.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1a3182f548e70bed996d1c3339df74b5ffda37e Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_e_t_a.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_t.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c9549bb5dacfd977f29ff53c61b5122e25e168 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_m_o_r_t.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_v_m_t_x.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_v_m_t_x.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cfd33173c79e491b14ee083486838a5d3a80d0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/_v_m_t_x.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otConverters.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otConverters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7565e5c06bdd039c24bdc4417b8fbe9469d6a0a7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otConverters.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fe40c7fd879d367ddf0611728f30411ad4d9114 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otTables.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixGlyph.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixGlyph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..624b6396da867f237f003c2b5586019b8a541c45 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixGlyph.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixStrike.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixStrike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de143d06f84440bb869bf4c5a98d9c74f0be0ea7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/sbixStrike.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_b_s_l_n.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_b_s_l_n.py new file mode 100644 index 0000000000000000000000000000000000000000..8e266fa54d0f0fd05bfde372627e1fb948d6f0fd --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_b_s_l_n.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html +class table__b_s_l_n(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_m_a_p.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_m_a_p.py new file mode 100644 index 0000000000000000000000000000000000000000..484c331cb77ac917bda77a01888f5736232c6dfa --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_m_a_p.py @@ -0,0 +1,1576 @@ +from fontTools.misc.textTools import bytesjoin, safeEval, readHex +from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import getSearchRange +from fontTools.unicode import Unicode +from . import DefaultTable +import sys +import struct +import array +import logging + + +log = logging.getLogger(__name__) + + +def _make_map(font, chars, gids): + assert len(chars) == len(gids) + glyphNames = font.getGlyphNameMany(gids) + cmap = {} + for char, gid, name in zip(chars, gids, glyphNames): + if gid == 0: + continue + cmap[char] = name + return cmap + + +class table__c_m_a_p(DefaultTable.DefaultTable): + """Character to Glyph Index Mapping Table + + This class represents the `cmap `_ + table, which maps between input characters (in Unicode or other system encodings) + and glyphs within the font. The ``cmap`` table contains one or more subtables + which determine the mapping of of characters to glyphs across different platforms + and encoding systems. + + ``table__c_m_a_p`` objects expose an accessor ``.tables`` which provides access + to the subtables, although it is normally easier to retrieve individual subtables + through the utility methods described below. To add new subtables to a font, + first determine the subtable format (if in doubt use format 4 for glyphs within + the BMP, format 12 for glyphs outside the BMP, and format 14 for Unicode Variation + Sequences) construct subtable objects with ``CmapSubtable.newSubtable(format)``, + and append them to the ``.tables`` list. + + Within a subtable, the mapping of characters to glyphs is provided by the ``.cmap`` + attribute. + + Example:: + + cmap4_0_3 = CmapSubtable.newSubtable(4) + cmap4_0_3.platformID = 0 + cmap4_0_3.platEncID = 3 + cmap4_0_3.language = 0 + cmap4_0_3.cmap = { 0xC1: "Aacute" } + + cmap = newTable("cmap") + cmap.tableVersion = 0 + cmap.tables = [cmap4_0_3] + """ + + def getcmap(self, platformID, platEncID): + """Returns the first subtable which matches the given platform and encoding. + + Args: + platformID (int): The platform ID. Use 0 for Unicode, 1 for Macintosh + (deprecated for new fonts), 2 for ISO (deprecated) and 3 for Windows. + encodingID (int): Encoding ID. Interpretation depends on the platform ID. + See the OpenType specification for details. + + Returns: + An object which is a subclass of :py:class:`CmapSubtable` if a matching + subtable is found within the font, or ``None`` otherwise. + """ + + for subtable in self.tables: + if subtable.platformID == platformID and subtable.platEncID == platEncID: + return subtable + return None # not found + + def getBestCmap( + self, + cmapPreferences=( + (3, 10), + (0, 6), + (0, 4), + (3, 1), + (0, 3), + (0, 2), + (0, 1), + (0, 0), + ), + ): + """Returns the 'best' Unicode cmap dictionary available in the font + or ``None``, if no Unicode cmap subtable is available. + + By default it will search for the following (platformID, platEncID) + pairs in order:: + + (3, 10), # Windows Unicode full repertoire + (0, 6), # Unicode full repertoire (format 13 subtable) + (0, 4), # Unicode 2.0 full repertoire + (3, 1), # Windows Unicode BMP + (0, 3), # Unicode 2.0 BMP + (0, 2), # Unicode ISO/IEC 10646 + (0, 1), # Unicode 1.1 + (0, 0) # Unicode 1.0 + + This particular order matches what HarfBuzz uses to choose what + subtable to use by default. This order prefers the largest-repertoire + subtable, and among those, prefers the Windows-platform over the + Unicode-platform as the former has wider support. + + This order can be customized via the ``cmapPreferences`` argument. + """ + for platformID, platEncID in cmapPreferences: + cmapSubtable = self.getcmap(platformID, platEncID) + if cmapSubtable is not None: + return cmapSubtable.cmap + return None # None of the requested cmap subtables were found + + def buildReversed(self): + """Builds a reverse mapping dictionary + + Iterates over all Unicode cmap tables and returns a dictionary mapping + glyphs to sets of codepoints, such as:: + + { + 'one': {0x31} + 'A': {0x41,0x391} + } + + The values are sets of Unicode codepoints because + some fonts map different codepoints to the same glyph. + For example, ``U+0041 LATIN CAPITAL LETTER A`` and ``U+0391 + GREEK CAPITAL LETTER ALPHA`` are sometimes the same glyph. + """ + result = {} + for subtable in self.tables: + if subtable.isUnicode(): + for codepoint, name in subtable.cmap.items(): + result.setdefault(name, set()).add(codepoint) + return result + + def decompile(self, data, ttFont): + tableVersion, numSubTables = struct.unpack(">HH", data[:4]) + self.tableVersion = int(tableVersion) + self.tables = tables = [] + seenOffsets = {} + for i in range(numSubTables): + platformID, platEncID, offset = struct.unpack( + ">HHl", data[4 + i * 8 : 4 + (i + 1) * 8] + ) + platformID, platEncID = int(platformID), int(platEncID) + format, length = struct.unpack(">HH", data[offset : offset + 4]) + if format in [8, 10, 12, 13]: + format, reserved, length = struct.unpack( + ">HHL", data[offset : offset + 8] + ) + elif format in [14]: + format, length = struct.unpack(">HL", data[offset : offset + 6]) + + if not length: + log.error( + "cmap subtable is reported as having zero length: platformID %s, " + "platEncID %s, format %s offset %s. Skipping table.", + platformID, + platEncID, + format, + offset, + ) + continue + table = CmapSubtable.newSubtable(format) + table.platformID = platformID + table.platEncID = platEncID + # Note that by default we decompile only the subtable header info; + # any other data gets decompiled only when an attribute of the + # subtable is referenced. + table.decompileHeader(data[offset : offset + int(length)], ttFont) + if offset in seenOffsets: + table.data = None # Mark as decompiled + table.cmap = tables[seenOffsets[offset]].cmap + else: + seenOffsets[offset] = i + tables.append(table) + if ttFont.lazy is False: # Be lazy for None and True + self.ensureDecompiled() + + def ensureDecompiled(self, recurse=False): + # The recurse argument is unused, but part of the signature of + # ensureDecompiled across the library. + for st in self.tables: + st.ensureDecompiled() + + def compile(self, ttFont): + self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__() + numSubTables = len(self.tables) + totalOffset = 4 + 8 * numSubTables + data = struct.pack(">HH", self.tableVersion, numSubTables) + tableData = b"" + seen = ( + {} + ) # Some tables are the same object reference. Don't compile them twice. + done = ( + {} + ) # Some tables are different objects, but compile to the same data chunk + for table in self.tables: + offset = seen.get(id(table.cmap)) + if offset is None: + chunk = table.compile(ttFont) + offset = done.get(chunk) + if offset is None: + offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len( + tableData + ) + tableData = tableData + chunk + data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset) + return data + tableData + + def toXML(self, writer, ttFont): + writer.simpletag("tableVersion", version=self.tableVersion) + writer.newline() + for table in self.tables: + table.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + self.tableVersion = safeEval(attrs["version"]) + return + if name[:12] != "cmap_format_": + return + if not hasattr(self, "tables"): + self.tables = [] + format = safeEval(name[12:]) + table = CmapSubtable.newSubtable(format) + table.platformID = safeEval(attrs["platformID"]) + table.platEncID = safeEval(attrs["platEncID"]) + table.fromXML(name, attrs, content, ttFont) + self.tables.append(table) + + +class CmapSubtable(object): + """Base class for all cmap subtable formats. + + Subclasses which handle the individual subtable formats are named + ``cmap_format_0``, ``cmap_format_2`` etc. Use :py:meth:`getSubtableClass` + to retrieve the concrete subclass, or :py:meth:`newSubtable` to get a + new subtable object for a given format. + + The object exposes a ``.cmap`` attribute, which contains a dictionary mapping + character codepoints to glyph names. + """ + + @staticmethod + def getSubtableClass(format): + """Return the subtable class for a format.""" + return cmap_classes.get(format, cmap_format_unknown) + + @staticmethod + def newSubtable(format): + """Return a new instance of a subtable for the given format + .""" + subtableClass = CmapSubtable.getSubtableClass(format) + return subtableClass(format) + + def __init__(self, format): + self.format = format + self.data = None + self.ttFont = None + self.platformID = None #: The platform ID of this subtable + self.platEncID = None #: The encoding ID of this subtable (interpretation depends on ``platformID``) + self.language = ( + None #: The language ID of this subtable (Macintosh platform only) + ) + + def ensureDecompiled(self, recurse=False): + # The recurse argument is unused, but part of the signature of + # ensureDecompiled across the library. + if self.data is None: + return + self.decompile(None, None) # use saved data. + self.data = None # Once this table has been decompiled, make sure we don't + # just return the original data. Also avoids recursion when + # called with an attribute that the cmap subtable doesn't have. + + def __getattr__(self, attr): + # allow lazy decompilation of subtables. + if attr[:2] == "__": # don't handle requests for member functions like '__lt__' + raise AttributeError(attr) + if self.data is None: + raise AttributeError(attr) + self.ensureDecompiled() + return getattr(self, attr) + + def decompileHeader(self, data, ttFont): + format, length, language = struct.unpack(">HHH", data[:6]) + assert ( + len(data) == length + ), "corrupt cmap table format %d (data length: %d, header length: %d)" % ( + format, + len(data), + length, + ) + self.format = int(format) + self.length = int(length) + self.language = int(language) + self.data = data[6:] + self.ttFont = ttFont + + def toXML(self, writer, ttFont): + writer.begintag( + self.__class__.__name__, + [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("language", self.language), + ], + ) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def getEncoding(self, default=None): + """Returns the Python encoding name for this cmap subtable based on its platformID, + platEncID, and language. If encoding for these values is not known, by default + ``None`` is returned. That can be overridden by passing a value to the ``default`` + argument. + + Note that if you want to choose a "preferred" cmap subtable, most of the time + ``self.isUnicode()`` is what you want as that one only returns true for the modern, + commonly used, Unicode-compatible triplets, not the legacy ones. + """ + return getEncoding(self.platformID, self.platEncID, self.language, default) + + def isUnicode(self): + """Returns true if the characters are interpreted as Unicode codepoints.""" + return self.platformID == 0 or ( + self.platformID == 3 and self.platEncID in [0, 1, 10] + ) + + def isSymbol(self): + """Returns true if the subtable is for the Symbol encoding (3,0)""" + return self.platformID == 3 and self.platEncID == 0 + + def _writeCodes(self, codes, writer): + isUnicode = self.isUnicode() + for code, name in codes: + writer.simpletag("map", code=hex(code), name=name) + if isUnicode: + writer.comment(Unicode[code]) + writer.newline() + + def __lt__(self, other): + if not isinstance(other, CmapSubtable): + return NotImplemented + + # implemented so that list.sort() sorts according to the spec. + selfTuple = ( + getattr(self, "platformID", None), + getattr(self, "platEncID", None), + getattr(self, "language", None), + self.__dict__, + ) + otherTuple = ( + getattr(other, "platformID", None), + getattr(other, "platEncID", None), + getattr(other, "language", None), + other.__dict__, + ) + return selfTuple < otherTuple + + +class cmap_format_0(CmapSubtable): + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + data = ( + self.data + ) # decompileHeader assigns the data after the header to self.data + assert 262 == self.length, "Format 0 cmap subtable not 262 bytes" + gids = array.array("B") + gids.frombytes(self.data) + charCodes = list(range(len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) + + def compile(self, ttFont): + if self.data: + return struct.pack(">HHH", 0, 262, self.language) + self.data + + cmap = self.cmap + assert set(cmap.keys()).issubset(range(256)) + getGlyphID = ttFont.getGlyphID + valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)] + + gids = array.array("B", valueList) + data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes() + assert len(data) == 262 + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +subHeaderFormat = ">HHhH" + + +class SubHeader(object): + def __init__(self): + self.firstCode = None + self.entryCount = None + self.idDelta = None + self.idRangeOffset = None + self.glyphIndexArray = [] + + +class cmap_format_2(CmapSubtable): + def setIDDelta(self, subHeader): + subHeader.idDelta = 0 + # find the minGI which is not zero. + minGI = subHeader.glyphIndexArray[0] + for gid in subHeader.glyphIndexArray: + if (gid != 0) and (gid < minGI): + minGI = gid + # The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1. + # idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K. + # We would like to pick an idDelta such that the first glyphArray GID is 1, + # so that we are more likely to be able to combine glypharray GID subranges. + # This means that we have a problem when minGI is > 32K + # Since the final gi is reconstructed from the glyphArray GID by: + # (short)finalGID = (gid + idDelta) % 0x10000), + # we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the + # negative number to an unsigned short. + + if minGI > 1: + if minGI > 0x7FFF: + subHeader.idDelta = -(0x10000 - minGI) - 1 + else: + subHeader.idDelta = minGI - 1 + idDelta = subHeader.idDelta + for i in range(subHeader.entryCount): + gid = subHeader.glyphIndexArray[i] + if gid > 0: + subHeader.glyphIndexArray[i] = gid - idDelta + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + + data = ( + self.data + ) # decompileHeader assigns the data after the header to self.data + subHeaderKeys = [] + maxSubHeaderindex = 0 + # get the key array, and determine the number of subHeaders. + allKeys = array.array("H") + allKeys.frombytes(data[:512]) + data = data[512:] + if sys.byteorder != "big": + allKeys.byteswap() + subHeaderKeys = [key // 8 for key in allKeys] + maxSubHeaderindex = max(subHeaderKeys) + + # Load subHeaders + subHeaderList = [] + pos = 0 + for i in range(maxSubHeaderindex + 1): + subHeader = SubHeader() + ( + subHeader.firstCode, + subHeader.entryCount, + subHeader.idDelta, + subHeader.idRangeOffset, + ) = struct.unpack(subHeaderFormat, data[pos : pos + 8]) + pos += 8 + giDataPos = pos + subHeader.idRangeOffset - 2 + giList = array.array("H") + giList.frombytes(data[giDataPos : giDataPos + subHeader.entryCount * 2]) + if sys.byteorder != "big": + giList.byteswap() + subHeader.glyphIndexArray = giList + subHeaderList.append(subHeader) + # How this gets processed. + # Charcodes may be one or two bytes. + # The first byte of a charcode is mapped through the subHeaderKeys, to select + # a subHeader. For any subheader but 0, the next byte is then mapped through the + # selected subheader. If subheader Index 0 is selected, then the byte itself is + # mapped through the subheader, and there is no second byte. + # Then assume that the subsequent byte is the first byte of the next charcode,and repeat. + # + # Each subheader references a range in the glyphIndexArray whose length is entryCount. + # The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray + # referenced by another subheader. + # The only subheader that will be referenced by more than one first-byte value is the subheader + # that maps the entire range of glyphID values to glyphIndex 0, e.g notdef: + # {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx} + # A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex. + # A subheader specifies a subrange within (0...256) by the + # firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero + # (e.g. glyph not in font). + # If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar). + # The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by + # counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the + # glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex. + # Example for Logocut-Medium + # first byte of charcode = 129; selects subheader 1. + # subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252} + # second byte of charCode = 66 + # the index offset = 66-64 = 2. + # The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is: + # [glyphIndexArray index], [subrange array index] = glyphIndex + # [256], [0]=1 from charcode [129, 64] + # [257], [1]=2 from charcode [129, 65] + # [258], [2]=3 from charcode [129, 66] + # [259], [3]=4 from charcode [129, 67] + # So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, + # add it to the glyphID to get the final glyphIndex + # value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew! + + self.data = b"" + cmap = {} + notdefGI = 0 + for firstByte in range(256): + subHeadindex = subHeaderKeys[firstByte] + subHeader = subHeaderList[subHeadindex] + if subHeadindex == 0: + if (firstByte < subHeader.firstCode) or ( + firstByte >= subHeader.firstCode + subHeader.entryCount + ): + continue # gi is notdef. + else: + charCode = firstByte + offsetIndex = firstByte - subHeader.firstCode + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue # gi is notdef. + cmap[charCode] = gi + else: + if subHeader.entryCount: + charCodeOffset = firstByte * 256 + subHeader.firstCode + for offsetIndex in range(subHeader.entryCount): + charCode = charCodeOffset + offsetIndex + gi = subHeader.glyphIndexArray[offsetIndex] + if gi != 0: + gi = (gi + subHeader.idDelta) % 0x10000 + else: + continue + cmap[charCode] = gi + # If not subHeader.entryCount, then all char codes with this first byte are + # mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the + # same as mapping it to .notdef. + + gids = list(cmap.values()) + charCodes = list(cmap.keys()) + self.cmap = _make_map(self.ttFont, charCodes, gids) + + def compile(self, ttFont): + if self.data: + return ( + struct.pack(">HHH", self.format, self.length, self.language) + self.data + ) + kEmptyTwoCharCodeRange = -1 + notdefGI = 0 + + items = sorted(self.cmap.items()) + charCodes = [item[0] for item in items] + names = [item[1] for item in items] + nameMap = ttFont.getReverseGlyphMap() + try: + gids = [nameMap[name] for name in names] + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = [nameMap[name] for name in names] + except KeyError: + # allow virtual GIDs in format 2 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if name[:3] == "gid": + gid = int(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + # Process the (char code to gid) item list in char code order. + # By definition, all one byte char codes map to subheader 0. + # For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, + # which defines all char codes in its range to map to notdef) unless proven otherwise. + # Note that since the char code items are processed in char code order, all the char codes with the + # same first byte are in sequential order. + + subHeaderKeys = [ + kEmptyTwoCharCodeRange for x in range(256) + ] # list of indices into subHeaderList. + subHeaderList = [] + + # We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up + # with a cmap where all the one byte char codes map to notdef, + # with the result that the subhead 0 would not get created just by processing the item list. + charCode = charCodes[0] + if charCode > 255: + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 0 + subHeaderList.append(subHeader) + + lastFirstByte = -1 + items = zip(charCodes, gids) + for charCode, gid in items: + if gid == 0: + continue + firstbyte = charCode >> 8 + secondByte = charCode & 0x00FF + + if ( + firstbyte != lastFirstByte + ): # Need to update the current subhead, and start a new one. + if lastFirstByte > -1: + # fix GI's and iDelta of current subheader. + self.setIDDelta(subHeader) + + # If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero + # for the indices matching the char codes. + if lastFirstByte == 0: + for index in range(subHeader.entryCount): + charCode = subHeader.firstCode + index + subHeaderKeys[charCode] = 0 + + assert subHeader.entryCount == len( + subHeader.glyphIndexArray + ), "Error - subhead entry count does not match len of glyphID subrange." + # init new subheader + subHeader = SubHeader() + subHeader.firstCode = secondByte + subHeader.entryCount = 1 + subHeader.glyphIndexArray.append(gid) + subHeaderList.append(subHeader) + subHeaderKeys[firstbyte] = len(subHeaderList) - 1 + lastFirstByte = firstbyte + else: + # need to fill in with notdefs all the code points between the last charCode and the current charCode. + codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount) + for i in range(codeDiff): + subHeader.glyphIndexArray.append(notdefGI) + subHeader.glyphIndexArray.append(gid) + subHeader.entryCount = subHeader.entryCount + codeDiff + 1 + + # fix GI's and iDelta of last subheader that we we added to the subheader array. + self.setIDDelta(subHeader) + + # Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges. + subHeader = SubHeader() + subHeader.firstCode = 0 + subHeader.entryCount = 0 + subHeader.idDelta = 0 + subHeader.idRangeOffset = 2 + subHeaderList.append(subHeader) + emptySubheadIndex = len(subHeaderList) - 1 + for index in range(256): + if subHeaderKeys[index] == kEmptyTwoCharCodeRange: + subHeaderKeys[index] = emptySubheadIndex + # Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the + # idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray, + # since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with + # charcode 0 and GID 0. + + idRangeOffset = ( + len(subHeaderList) - 1 + ) * 8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset. + subheadRangeLen = ( + len(subHeaderList) - 1 + ) # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2. + for index in range(subheadRangeLen): + subHeader = subHeaderList[index] + subHeader.idRangeOffset = 0 + for j in range(index): + prevSubhead = subHeaderList[j] + if ( + prevSubhead.glyphIndexArray == subHeader.glyphIndexArray + ): # use the glyphIndexArray subarray + subHeader.idRangeOffset = ( + prevSubhead.idRangeOffset - (index - j) * 8 + ) + subHeader.glyphIndexArray = [] + break + if subHeader.idRangeOffset == 0: # didn't find one. + subHeader.idRangeOffset = idRangeOffset + idRangeOffset = ( + idRangeOffset - 8 + ) + subHeader.entryCount * 2 # one less subheader, one more subArray. + else: + idRangeOffset = idRangeOffset - 8 # one less subheader + + # Now we can write out the data! + length = ( + 6 + 512 + 8 * len(subHeaderList) + ) # header, 256 subHeaderKeys, and subheader array. + for subhead in subHeaderList[:-1]: + length = ( + length + len(subhead.glyphIndexArray) * 2 + ) # We can't use subhead.entryCount, as some of the subhead may share subArrays. + dataList = [struct.pack(">HHH", 2, length, self.language)] + for index in subHeaderKeys: + dataList.append(struct.pack(">H", index * 8)) + for subhead in subHeaderList: + dataList.append( + struct.pack( + subHeaderFormat, + subhead.firstCode, + subhead.entryCount, + subhead.idDelta, + subhead.idRangeOffset, + ) + ) + for subhead in subHeaderList[:-1]: + for gi in subhead.glyphIndexArray: + dataList.append(struct.pack(">H", gi)) + data = bytesjoin(dataList) + assert len(data) == length, ( + "Error: cmap format 2 is not same length as calculated! actual: " + + str(len(data)) + + " calc : " + + str(length) + ) + return data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +cmap_format_4_format = ">7H" + +# uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF. +# uint16 reservedPad # This value should be zero +# uint16 startCode[segCount] # Starting character code for each segment +# uint16 idDelta[segCount] # Delta for all character codes in segment +# uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0 +# uint16 glyphIndexArray[variable] # Glyph index array + + +def splitRange(startCode, endCode, cmap): + # Try to split a range of character codes into subranges with consecutive + # glyph IDs in such a way that the cmap4 subtable can be stored "most" + # efficiently. I can't prove I've got the optimal solution, but it seems + # to do well with the fonts I tested: none became bigger, many became smaller. + if startCode == endCode: + return [], [endCode] + + lastID = cmap[startCode] + lastCode = startCode + inOrder = None + orderedBegin = None + subRanges = [] + + # Gather subranges in which the glyph IDs are consecutive. + for code in range(startCode + 1, endCode + 1): + glyphID = cmap[code] + + if glyphID - 1 == lastID: + if inOrder is None or not inOrder: + inOrder = 1 + orderedBegin = lastCode + else: + if inOrder: + inOrder = 0 + subRanges.append((orderedBegin, lastCode)) + orderedBegin = None + + lastID = glyphID + lastCode = code + + if inOrder: + subRanges.append((orderedBegin, lastCode)) + assert lastCode == endCode + + # Now filter out those new subranges that would only make the data bigger. + # A new segment cost 8 bytes, not using a new segment costs 2 bytes per + # character. + newRanges = [] + for b, e in subRanges: + if b == startCode and e == endCode: + break # the whole range, we're fine + if b == startCode or e == endCode: + threshold = 4 # split costs one more segment + else: + threshold = 8 # split costs two more segments + if (e - b + 1) > threshold: + newRanges.append((b, e)) + subRanges = newRanges + + if not subRanges: + return [], [endCode] + + if subRanges[0][0] != startCode: + subRanges.insert(0, (startCode, subRanges[0][0] - 1)) + if subRanges[-1][1] != endCode: + subRanges.append((subRanges[-1][1] + 1, endCode)) + + # Fill the "holes" in the segments list -- those are the segments in which + # the glyph IDs are _not_ consecutive. + i = 1 + while i < len(subRanges): + if subRanges[i - 1][1] + 1 != subRanges[i][0]: + subRanges.insert(i, (subRanges[i - 1][1] + 1, subRanges[i][0] - 1)) + i = i + 1 + i = i + 1 + + # Transform the ranges into startCode/endCode lists. + start = [] + end = [] + for b, e in subRanges: + start.append(b) + end.append(e) + start.pop(0) + + assert len(start) + 1 == len(end) + return start, end + + +class cmap_format_4(CmapSubtable): + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + + data = ( + self.data + ) # decompileHeader assigns the data after the header to self.data + (segCountX2, searchRange, entrySelector, rangeShift) = struct.unpack( + ">4H", data[:8] + ) + data = data[8:] + segCount = segCountX2 // 2 + + allCodes = array.array("H") + allCodes.frombytes(data) + self.data = data = None + + if sys.byteorder != "big": + allCodes.byteswap() + + # divide the data + endCode = allCodes[:segCount] + allCodes = allCodes[segCount + 1 :] # the +1 is skipping the reservedPad field + startCode = allCodes[:segCount] + allCodes = allCodes[segCount:] + idDelta = allCodes[:segCount] + allCodes = allCodes[segCount:] + idRangeOffset = allCodes[:segCount] + glyphIndexArray = allCodes[segCount:] + lenGIArray = len(glyphIndexArray) + + # build 2-byte character mapping + charCodes = [] + gids = [] + for i in range(len(startCode) - 1): # don't do 0xffff! + start = startCode[i] + delta = idDelta[i] + rangeOffset = idRangeOffset[i] + partial = rangeOffset // 2 - start + i - len(idRangeOffset) + + rangeCharCodes = list(range(startCode[i], endCode[i] + 1)) + charCodes.extend(rangeCharCodes) + if rangeOffset == 0: + gids.extend( + [(charCode + delta) & 0xFFFF for charCode in rangeCharCodes] + ) + else: + for charCode in rangeCharCodes: + index = charCode + partial + assert index < lenGIArray, ( + "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" + % (i, index, lenGIArray) + ) + if glyphIndexArray[index] != 0: # if not missing glyph + glyphID = glyphIndexArray[index] + delta + else: + glyphID = 0 # missing glyph + gids.append(glyphID & 0xFFFF) + + self.cmap = _make_map(self.ttFont, charCodes, gids) + + def compile(self, ttFont): + if self.data: + return ( + struct.pack(">HHH", self.format, self.length, self.language) + self.data + ) + + charCodes = list(self.cmap.keys()) + if not charCodes: + startCode = [0xFFFF] + endCode = [0xFFFF] + else: + charCodes.sort() + names = [self.cmap[code] for code in charCodes] + nameMap = ttFont.getReverseGlyphMap() + try: + gids = [nameMap[name] for name in names] + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = [nameMap[name] for name in names] + except KeyError: + # allow virtual GIDs in format 4 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if name[:3] == "gid": + gid = int(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + cmap = {} # code:glyphID mapping + for code, gid in zip(charCodes, gids): + cmap[code] = gid + + # Build startCode and endCode lists. + # Split the char codes in ranges of consecutive char codes, then split + # each range in more ranges of consecutive/not consecutive glyph IDs. + # See splitRange(). + lastCode = charCodes[0] + endCode = [] + startCode = [lastCode] + for charCode in charCodes[ + 1: + ]: # skip the first code, it's the first start code + if charCode == lastCode + 1: + lastCode = charCode + continue + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(charCode) + lastCode = charCode + start, end = splitRange(startCode[-1], lastCode, cmap) + startCode.extend(start) + endCode.extend(end) + startCode.append(0xFFFF) + endCode.append(0xFFFF) + + # build up rest of cruft + idDelta = [] + idRangeOffset = [] + glyphIndexArray = [] + for i in range(len(endCode) - 1): # skip the closing codes (0xffff) + indices = [] + for charCode in range(startCode[i], endCode[i] + 1): + indices.append(cmap[charCode]) + if indices == list(range(indices[0], indices[0] + len(indices))): + idDelta.append((indices[0] - startCode[i]) % 0x10000) + idRangeOffset.append(0) + else: + idDelta.append(0) + idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i)) + glyphIndexArray.extend(indices) + idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef + idRangeOffset.append(0) + + # Insane. + segCount = len(endCode) + segCountX2 = segCount * 2 + searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2) + + charCodeArray = array.array("H", endCode + [0] + startCode) + idDeltaArray = array.array("H", idDelta) + restArray = array.array("H", idRangeOffset + glyphIndexArray) + if sys.byteorder != "big": + charCodeArray.byteswap() + if sys.byteorder != "big": + idDeltaArray.byteswap() + if sys.byteorder != "big": + restArray.byteswap() + data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes() + + length = struct.calcsize(cmap_format_4_format) + len(data) + header = struct.pack( + cmap_format_4_format, + self.format, + length, + self.language, + segCountX2, + searchRange, + entrySelector, + rangeShift, + ) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + nameMap, attrsMap, dummyContent = element + if nameMap != "map": + assert 0, "Unrecognized keyword in cmap subtable" + cmap[safeEval(attrsMap["code"])] = attrsMap["name"] + + +class cmap_format_6(CmapSubtable): + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + + data = ( + self.data + ) # decompileHeader assigns the data after the header to self.data + firstCode, entryCount = struct.unpack(">HH", data[:4]) + firstCode = int(firstCode) + data = data[4:] + # assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!! + gids = array.array("H") + gids.frombytes(data[: 2 * int(entryCount)]) + if sys.byteorder != "big": + gids.byteswap() + self.data = data = None + + charCodes = list(range(firstCode, firstCode + len(gids))) + self.cmap = _make_map(self.ttFont, charCodes, gids) + + def compile(self, ttFont): + if self.data: + return ( + struct.pack(">HHH", self.format, self.length, self.language) + self.data + ) + cmap = self.cmap + codes = sorted(cmap.keys()) + if codes: # yes, there are empty cmap tables. + codes = list(range(codes[0], codes[-1] + 1)) + firstCode = codes[0] + valueList = [ + ttFont.getGlyphID(cmap[code]) if code in cmap else 0 for code in codes + ] + gids = array.array("H", valueList) + if sys.byteorder != "big": + gids.byteswap() + data = gids.tobytes() + else: + data = b"" + firstCode = 0 + header = struct.pack( + ">HHHHH", 6, len(data) + 10, self.language, firstCode, len(codes) + ) + return header + data + + def fromXML(self, name, attrs, content, ttFont): + self.language = safeEval(attrs["language"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12_or_13(CmapSubtable): + def __init__(self, format): + self.format = format + self.reserved = 0 + self.data = None + self.ttFont = None + + def decompileHeader(self, data, ttFont): + format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16]) + assert ( + len(data) == (16 + nGroups * 12) == (length) + ), "corrupt cmap table format %d (data length: %d, header length: %d)" % ( + self.format, + len(data), + length, + ) + self.format = format + self.reserved = reserved + self.length = length + self.language = language + self.nGroups = nGroups + self.data = data[16:] + self.ttFont = ttFont + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + + data = ( + self.data + ) # decompileHeader assigns the data after the header to self.data + charCodes = [] + gids = [] + pos = 0 + for i in range(self.nGroups): + startCharCode, endCharCode, glyphID = struct.unpack( + ">LLL", data[pos : pos + 12] + ) + pos += 12 + lenGroup = 1 + endCharCode - startCharCode + charCodes.extend(list(range(startCharCode, endCharCode + 1))) + gids.extend(self._computeGIDs(glyphID, lenGroup)) + self.data = data = None + self.cmap = _make_map(self.ttFont, charCodes, gids) + + def compile(self, ttFont): + if self.data: + return ( + struct.pack( + ">HHLLL", + self.format, + self.reserved, + self.length, + self.language, + self.nGroups, + ) + + self.data + ) + charCodes = list(self.cmap.keys()) + names = list(self.cmap.values()) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = [nameMap[name] for name in names] + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + try: + gids = [nameMap[name] for name in names] + except KeyError: + # allow virtual GIDs in format 12 tables + gids = [] + for name in names: + try: + gid = nameMap[name] + except KeyError: + try: + if name[:3] == "gid": + gid = int(name[3:]) + else: + gid = ttFont.getGlyphID(name) + except: + raise KeyError(name) + + gids.append(gid) + + cmap = {} # code:glyphID mapping + for code, gid in zip(charCodes, gids): + cmap[code] = gid + + charCodes.sort() + index = 0 + startCharCode = charCodes[0] + startGlyphID = cmap[startCharCode] + lastGlyphID = startGlyphID - self._format_step + lastCharCode = startCharCode - 1 + nGroups = 0 + dataList = [] + maxIndex = len(charCodes) + for index in range(maxIndex): + charCode = charCodes[index] + glyphID = cmap[charCode] + if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode): + dataList.append( + struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID) + ) + startCharCode = charCode + startGlyphID = glyphID + nGroups = nGroups + 1 + lastGlyphID = glyphID + lastCharCode = charCode + dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID)) + nGroups = nGroups + 1 + data = bytesjoin(dataList) + lengthSubtable = len(data) + 16 + assert len(data) == (nGroups * 12) == (lengthSubtable - 16) + return ( + struct.pack( + ">HHLLL", + self.format, + self.reserved, + lengthSubtable, + self.language, + nGroups, + ) + + data + ) + + def toXML(self, writer, ttFont): + writer.begintag( + self.__class__.__name__, + [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("format", self.format), + ("reserved", self.reserved), + ("length", self.length), + ("language", self.language), + ("nGroups", self.nGroups), + ], + ) + writer.newline() + codes = sorted(self.cmap.items()) + self._writeCodes(codes, writer) + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.format = safeEval(attrs["format"]) + self.reserved = safeEval(attrs["reserved"]) + self.length = safeEval(attrs["length"]) + self.language = safeEval(attrs["language"]) + self.nGroups = safeEval(attrs["nGroups"]) + if not hasattr(self, "cmap"): + self.cmap = {} + cmap = self.cmap + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + cmap[safeEval(attrs["code"])] = attrs["name"] + + +class cmap_format_12(cmap_format_12_or_13): + _format_step = 1 + + def __init__(self, format=12): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return list(range(startingGlyph, startingGlyph + numberOfGlyphs)) + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode) + + +class cmap_format_13(cmap_format_12_or_13): + _format_step = 0 + + def __init__(self, format=13): + cmap_format_12_or_13.__init__(self, format) + + def _computeGIDs(self, startingGlyph, numberOfGlyphs): + return [startingGlyph] * numberOfGlyphs + + def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode): + return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode) + + +def cvtToUVS(threeByteString): + data = b"\0" + threeByteString + (val,) = struct.unpack(">L", data) + return val + + +def cvtFromUVS(val): + assert 0 <= val < 0x1000000 + fourByteString = struct.pack(">L", val) + return fourByteString[1:] + + +class cmap_format_14(CmapSubtable): + def decompileHeader(self, data, ttFont): + format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10]) + self.data = data[10:] + self.length = length + self.numVarSelectorRecords = numVarSelectorRecords + self.ttFont = ttFont + self.language = 0xFF # has no language. + + def decompile(self, data, ttFont): + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + data = self.data + + self.cmap = ( + {} + ) # so that clients that expect this to exist in a cmap table won't fail. + uvsDict = {} + recOffset = 0 + for n in range(self.numVarSelectorRecords): + uvs, defOVSOffset, nonDefUVSOffset = struct.unpack( + ">3sLL", data[recOffset : recOffset + 11] + ) + recOffset += 11 + varUVS = cvtToUVS(uvs) + if defOVSOffset: + startOffset = defOVSOffset - 10 + (numValues,) = struct.unpack(">L", data[startOffset : startOffset + 4]) + startOffset += 4 + for r in range(numValues): + uv, addtlCnt = struct.unpack( + ">3sB", data[startOffset : startOffset + 4] + ) + startOffset += 4 + firstBaseUV = cvtToUVS(uv) + cnt = addtlCnt + 1 + baseUVList = list(range(firstBaseUV, firstBaseUV + cnt)) + glyphList = [None] * cnt + localUVList = zip(baseUVList, glyphList) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = list(localUVList) + + if nonDefUVSOffset: + startOffset = nonDefUVSOffset - 10 + (numRecs,) = struct.unpack(">L", data[startOffset : startOffset + 4]) + startOffset += 4 + localUVList = [] + for r in range(numRecs): + uv, gid = struct.unpack(">3sH", data[startOffset : startOffset + 5]) + startOffset += 5 + uv = cvtToUVS(uv) + glyphName = self.ttFont.getGlyphName(gid) + localUVList.append((uv, glyphName)) + try: + uvsDict[varUVS].extend(localUVList) + except KeyError: + uvsDict[varUVS] = localUVList + + self.uvsDict = uvsDict + + def toXML(self, writer, ttFont): + writer.begintag( + self.__class__.__name__, + [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ], + ) + writer.newline() + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + for uvs in uvsList: + uvList = uvsDict[uvs] + uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1])) + for uv, gname in uvList: + attrs = [("uv", hex(uv)), ("uvs", hex(uvs))] + if gname is not None: + attrs.append(("name", gname)) + writer.simpletag("map", attrs) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail + if not hasattr(self, "cmap"): + self.cmap = ( + {} + ) # so that clients that expect this to exist in a cmap table won't fail. + if not hasattr(self, "uvsDict"): + self.uvsDict = {} + uvsDict = self.uvsDict + + # For backwards compatibility reasons we accept "None" as an indicator + # for "default mapping", unless the font actually has a glyph named + # "None". + _hasGlyphNamedNone = None + + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "map": + continue + uvs = safeEval(attrs["uvs"]) + uv = safeEval(attrs["uv"]) + gname = attrs.get("name") + if gname == "None": + if _hasGlyphNamedNone is None: + _hasGlyphNamedNone = "None" in ttFont.getGlyphOrder() + if not _hasGlyphNamedNone: + gname = None + try: + uvsDict[uvs].append((uv, gname)) + except KeyError: + uvsDict[uvs] = [(uv, gname)] + + def compile(self, ttFont): + if self.data: + return ( + struct.pack( + ">HLL", self.format, self.length, self.numVarSelectorRecords + ) + + self.data + ) + + uvsDict = self.uvsDict + uvsList = sorted(uvsDict.keys()) + self.numVarSelectorRecords = len(uvsList) + offset = ( + 10 + self.numVarSelectorRecords * 11 + ) # current value is end of VarSelectorRecords block. + data = [] + varSelectorRecords = [] + for uvs in uvsList: + entryList = uvsDict[uvs] + + defList = [entry for entry in entryList if entry[1] is None] + if defList: + defList = [entry[0] for entry in defList] + defOVSOffset = offset + defList.sort() + + lastUV = defList[0] + cnt = -1 + defRecs = [] + for defEntry in defList: + cnt += 1 + if (lastUV + cnt) != defEntry: + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt - 1) + lastUV = defEntry + defRecs.append(rec) + cnt = 0 + + rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt) + defRecs.append(rec) + + numDefRecs = len(defRecs) + data.append(struct.pack(">L", numDefRecs)) + data.extend(defRecs) + offset += 4 + numDefRecs * 4 + else: + defOVSOffset = 0 + + ndefList = [entry for entry in entryList if entry[1] is not None] + if ndefList: + nonDefUVSOffset = offset + ndefList.sort() + numNonDefRecs = len(ndefList) + data.append(struct.pack(">L", numNonDefRecs)) + offset += 4 + numNonDefRecs * 5 + + for uv, gname in ndefList: + gid = ttFont.getGlyphID(gname) + ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid) + data.append(ndrec) + else: + nonDefUVSOffset = 0 + + vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset) + varSelectorRecords.append(vrec) + + data = bytesjoin(varSelectorRecords) + bytesjoin(data) + self.length = 10 + len(data) + headerdata = struct.pack( + ">HLL", self.format, self.length, self.numVarSelectorRecords + ) + + return headerdata + data + + +class cmap_format_unknown(CmapSubtable): + def toXML(self, writer, ttFont): + cmapName = self.__class__.__name__[:12] + str(self.format) + writer.begintag( + cmapName, + [ + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ], + ) + writer.newline() + writer.dumphex(self.data) + writer.endtag(cmapName) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.data = readHex(content) + self.cmap = {} + + def decompileHeader(self, data, ttFont): + self.language = 0 # dummy value + self.data = data + + def decompile(self, data, ttFont): + # we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None. + # If not, someone is calling the subtable decompile() directly, and must provide both args. + if data is not None and ttFont is not None: + self.decompileHeader(data, ttFont) + else: + assert ( + data is None and ttFont is None + ), "Need both data and ttFont arguments" + + def compile(self, ttFont): + if self.data: + return self.data + else: + return None + + +cmap_classes = { + 0: cmap_format_0, + 2: cmap_format_2, + 4: cmap_format_4, + 6: cmap_format_6, + 12: cmap_format_12, + 13: cmap_format_13, + 14: cmap_format_14, +} diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_v_a_r.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_v_a_r.py new file mode 100644 index 0000000000000000000000000000000000000000..6ea44dbab3b0a4b0da1e5327d077873867f0b520 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_v_a_r.py @@ -0,0 +1,86 @@ +from . import DefaultTable +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytesjoin +from fontTools.ttLib.tables.TupleVariation import ( + compileTupleVariationStore, + decompileTupleVariationStore, + TupleVariation, +) + + +# https://www.microsoft.com/typography/otspec/cvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html + +CVAR_HEADER_FORMAT = """ + > # big endian + majorVersion: H + minorVersion: H + tupleVariationCount: H + offsetToData: H +""" + +CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT) + + +class table__c_v_a_r(DefaultTable.DefaultTable): + dependencies = ["cvt ", "fvar"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.majorVersion, self.minorVersion = 1, 0 + self.variations = [] + + def compile(self, ttFont, useSharedPoints=False): + tupleVariationCount, tuples, data = compileTupleVariationStore( + variations=[v for v in self.variations if v.hasImpact()], + pointCount=len(ttFont["cvt "].values), + axisTags=[axis.axisTag for axis in ttFont["fvar"].axes], + sharedTupleIndices={}, + useSharedPoints=useSharedPoints, + ) + header = { + "majorVersion": self.majorVersion, + "minorVersion": self.minorVersion, + "tupleVariationCount": tupleVariationCount, + "offsetToData": CVAR_HEADER_SIZE + len(tuples), + } + return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data]) + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + header = {} + sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header) + self.majorVersion = header["majorVersion"] + self.minorVersion = header["minorVersion"] + assert self.majorVersion == 1, self.majorVersion + self.variations = decompileTupleVariationStore( + tableTag=self.tableTag, + axisTags=axisTags, + tupleVariationCount=header["tupleVariationCount"], + pointCount=len(ttFont["cvt "].values), + sharedTuples=None, + data=data, + pos=CVAR_HEADER_SIZE, + dataPos=header["offsetToData"], + ) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.majorVersion = int(attrs.get("major", "1")) + self.minorVersion = int(attrs.get("minor", "0")) + elif name == "tuple": + valueCount = len(ttFont["cvt "].values) + var = TupleVariation({}, [None] * valueCount) + self.variations.append(var) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + var.fromXML(tupleName, tupleAttrs, tupleContent) + + def toXML(self, writer, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion) + writer.newline() + for var in self.variations: + var.toXML(writer, axisTags) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_c_i_d.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_c_i_d.py new file mode 100644 index 0000000000000000000000000000000000000000..2e746c846fa14800cb7de93969984dac36678e4e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_c_i_d.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html +class table__g_c_i_d(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_v_a_r.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_v_a_r.py new file mode 100644 index 0000000000000000000000000000000000000000..044f65f716ec7c6c2b576e22b93fb8a610e62bd5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_v_a_r.py @@ -0,0 +1,277 @@ +from collections import deque +from functools import partial +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.lazyTools import LazyDict +from . import DefaultTable +import array +import itertools +import logging +import struct +import sys +import fontTools.ttLib.tables.TupleVariation as tv + + +log = logging.getLogger(__name__) +TupleVariation = tv.TupleVariation + + +# https://www.microsoft.com/typography/otspec/gvar.htm +# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm +# +# Apple's documentation of 'gvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html +# +# FreeType2 source code for parsing 'gvar': +# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c + +GVAR_HEADER_FORMAT = """ + > # big endian + version: H + reserved: H + axisCount: H + sharedTupleCount: H + offsetToSharedTuples: I + glyphCount: H + flags: H + offsetToGlyphVariationData: I +""" + +GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT) + + +class table__g_v_a_r(DefaultTable.DefaultTable): + dependencies = ["fvar", "glyf"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.reserved = 1, 0 + self.variations = {} + + def compile(self, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + sharedTuples = tv.compileSharedTuples( + axisTags, itertools.chain(*self.variations.values()) + ) + sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)} + sharedTupleSize = sum([len(c) for c in sharedTuples]) + compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices) + offset = 0 + offsets = [] + for glyph in compiledGlyphs: + offsets.append(offset) + offset += len(glyph) + offsets.append(offset) + compiledOffsets, tableFormat = self.compileOffsets_(offsets) + + header = {} + header["version"] = self.version + header["reserved"] = self.reserved + header["axisCount"] = len(axisTags) + header["sharedTupleCount"] = len(sharedTuples) + header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets) + header["glyphCount"] = len(compiledGlyphs) + header["flags"] = tableFormat + header["offsetToGlyphVariationData"] = ( + header["offsetToSharedTuples"] + sharedTupleSize + ) + compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header) + + result = [compiledHeader, compiledOffsets] + result.extend(sharedTuples) + result.extend(compiledGlyphs) + return b"".join(result) + + def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices): + result = [] + glyf = ttFont["glyf"] + for glyphName in ttFont.getGlyphOrder(): + variations = self.variations.get(glyphName, []) + if not variations: + result.append(b"") + continue + pointCountUnused = 0 # pointCount is actually unused by compileGlyph + result.append( + compileGlyph_( + variations, pointCountUnused, axisTags, sharedCoordIndices + ) + ) + return result + + def decompile(self, data, ttFont): + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + glyphs = ttFont.getGlyphOrder() + sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self) + assert len(glyphs) == self.glyphCount + assert len(axisTags) == self.axisCount + sharedCoords = tv.decompileSharedTuples( + axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples + ) + variations = {} + offsetToData = self.offsetToGlyphVariationData + glyf = ttFont["glyf"] + + def get_read_item(): + reverseGlyphMap = ttFont.getReverseGlyphMap() + tableFormat = self.flags & 1 + + def read_item(glyphName): + gid = reverseGlyphMap[glyphName] + offsetSize = 2 if tableFormat == 0 else 4 + startOffset = GVAR_HEADER_SIZE + offsetSize * gid + endOffset = startOffset + offsetSize * 2 + offsets = table__g_v_a_r.decompileOffsets_( + data[startOffset:endOffset], + tableFormat=tableFormat, + glyphCount=1, + ) + gvarData = data[offsetToData + offsets[0] : offsetToData + offsets[1]] + if not gvarData: + return [] + glyph = glyf[glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + return decompileGlyph_( + numPointsInGlyph, sharedCoords, axisTags, gvarData + ) + + return read_item + + read_item = get_read_item() + l = LazyDict({glyphs[gid]: read_item for gid in range(self.glyphCount)}) + + self.variations = l + + if ttFont.lazy is False: # Be lazy for None and True + self.ensureDecompiled() + + def ensureDecompiled(self, recurse=False): + # The recurse argument is unused, but part of the signature of + # ensureDecompiled across the library. + # Use a zero-length deque to consume the lazy dict + deque(self.variations.values(), maxlen=0) + + @staticmethod + def decompileOffsets_(data, tableFormat, glyphCount): + if tableFormat == 0: + # Short format: array of UInt16 + offsets = array.array("H") + offsetsSize = (glyphCount + 1) * 2 + else: + # Long format: array of UInt32 + offsets = array.array("I") + offsetsSize = (glyphCount + 1) * 4 + offsets.frombytes(data[0:offsetsSize]) + if sys.byteorder != "big": + offsets.byteswap() + + # In the short format, offsets need to be multiplied by 2. + # This is not documented in Apple's TrueType specification, + # but can be inferred from the FreeType implementation, and + # we could verify it with two sample GX fonts. + if tableFormat == 0: + offsets = [off * 2 for off in offsets] + + return offsets + + @staticmethod + def compileOffsets_(offsets): + """Packs a list of offsets into a 'gvar' offset table. + + Returns a pair (bytestring, tableFormat). Bytestring is the + packed offset table. Format indicates whether the table + uses short (tableFormat=0) or long (tableFormat=1) integers. + The returned tableFormat should get packed into the flags field + of the 'gvar' header. + """ + assert len(offsets) >= 2 + for i in range(1, len(offsets)): + assert offsets[i - 1] <= offsets[i] + if max(offsets) <= 0xFFFF * 2: + packed = array.array("H", [n >> 1 for n in offsets]) + tableFormat = 0 + else: + packed = array.array("I", offsets) + tableFormat = 1 + if sys.byteorder != "big": + packed.byteswap() + return (packed.tobytes(), tableFormat) + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("reserved", value=self.reserved) + writer.newline() + axisTags = [axis.axisTag for axis in ttFont["fvar"].axes] + for glyphName in ttFont.getGlyphNames(): + variations = self.variations.get(glyphName) + if not variations: + continue + writer.begintag("glyphVariations", glyph=glyphName) + writer.newline() + for gvar in variations: + gvar.toXML(writer, axisTags) + writer.endtag("glyphVariations") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + elif name == "reserved": + self.reserved = safeEval(attrs["value"]) + elif name == "glyphVariations": + if not hasattr(self, "variations"): + self.variations = {} + glyphName = attrs["glyph"] + glyph = ttFont["glyf"][glyphName] + numPointsInGlyph = self.getNumPoints_(glyph) + glyphVariations = [] + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + if name == "tuple": + gvar = TupleVariation({}, [None] * numPointsInGlyph) + glyphVariations.append(gvar) + for tupleElement in content: + if isinstance(tupleElement, tuple): + tupleName, tupleAttrs, tupleContent = tupleElement + gvar.fromXML(tupleName, tupleAttrs, tupleContent) + self.variations[glyphName] = glyphVariations + + @staticmethod + def getNumPoints_(glyph): + NUM_PHANTOM_POINTS = 4 + + if glyph.isComposite(): + return len(glyph.components) + NUM_PHANTOM_POINTS + else: + # Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute. + return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS + + +def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices): + tupleVariationCount, tuples, data = tv.compileTupleVariationStore( + variations, pointCount, axisTags, sharedCoordIndices + ) + if tupleVariationCount == 0: + return b"" + result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data] + if (len(tuples) + len(data)) % 2 != 0: + result.append(b"\0") # padding + return b"".join(result) + + +def decompileGlyph_(pointCount, sharedTuples, axisTags, data): + if len(data) < 4: + return [] + tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4]) + dataPos = offsetToData + return tv.decompileTupleVariationStore( + "gvar", + axisTags, + tupleVariationCount, + pointCount, + sharedTuples, + data, + 4, + offsetToData, + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_d_m_x.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_d_m_x.py new file mode 100644 index 0000000000000000000000000000000000000000..b6d56a7e70823e14d790361a844df2c6553ff35f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_d_m_x.py @@ -0,0 +1,119 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytechr, byteord, strjoin +from . import DefaultTable +import array +from collections.abc import Mapping + +hdmxHeaderFormat = """ + > # big endian! + version: H + numRecords: H + recordSize: l +""" + + +class _GlyphnamedList(Mapping): + def __init__(self, reverseGlyphOrder, data): + self._array = data + self._map = dict(reverseGlyphOrder) + + def __getitem__(self, k): + return self._array[self._map[k]] + + def __len__(self): + return len(self._map) + + def __iter__(self): + return iter(self._map) + + def keys(self): + return self._map.keys() + + +class table__h_d_m_x(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + numGlyphs = ttFont["maxp"].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self) + self.hdmx = {} + for i in range(self.numRecords): + ppem = byteord(data[0]) + maxSize = byteord(data[1]) + widths = _GlyphnamedList( + ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs]) + ) + self.hdmx[ppem] = widths + data = data[self.recordSize :] + assert len(data) == 0, "too much hdmx data" + + def compile(self, ttFont): + self.version = 0 + numGlyphs = ttFont["maxp"].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + self.recordSize = 4 * ((2 + numGlyphs + 3) // 4) + pad = (self.recordSize - 2 - numGlyphs) * b"\0" + self.numRecords = len(self.hdmx) + data = sstruct.pack(hdmxHeaderFormat, self) + items = sorted(self.hdmx.items()) + for ppem, widths in items: + data = data + bytechr(ppem) + bytechr(max(widths.values())) + for glyphID in range(len(glyphOrder)): + width = widths[glyphOrder[glyphID]] + data = data + bytechr(width) + data = data + pad + return data + + def toXML(self, writer, ttFont): + writer.begintag("hdmxData") + writer.newline() + ppems = sorted(self.hdmx.keys()) + records = [] + format = "" + for ppem in ppems: + widths = self.hdmx[ppem] + records.append(widths) + format = format + "%4d" + glyphNames = ttFont.getGlyphOrder()[:] + glyphNames.sort() + maxNameLen = max(map(len, glyphNames)) + format = "%" + repr(maxNameLen) + "s:" + format + " ;" + writer.write(format % (("ppem",) + tuple(ppems))) + writer.newline() + writer.newline() + for glyphName in glyphNames: + row = [] + for ppem in ppems: + widths = self.hdmx[ppem] + row.append(widths[glyphName]) + if ";" in glyphName: + glyphName = "\\x3b".join(glyphName.split(";")) + writer.write(format % ((glyphName,) + tuple(row))) + writer.newline() + writer.endtag("hdmxData") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "hdmxData": + return + content = strjoin(content) + lines = content.split(";") + topRow = lines[0].split() + assert topRow[0] == "ppem:", "illegal hdmx format" + ppems = list(map(int, topRow[1:])) + self.hdmx = hdmx = {} + for ppem in ppems: + hdmx[ppem] = {} + lines = (line.split() for line in lines[1:]) + for line in lines: + if not line: + continue + assert line[0][-1] == ":", "illegal hdmx format" + glyphName = line[0][:-1] + if "\\" in glyphName: + from fontTools.misc.textTools import safeEval + + glyphName = safeEval('"""' + glyphName + '"""') + line = list(map(int, line[1:])) + assert len(line) == len(ppems), "illegal hdmx format" + for i in range(len(ppems)): + hdmx[ppems[i]][glyphName] = line[i] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_k_e_r_n.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_k_e_r_n.py new file mode 100644 index 0000000000000000000000000000000000000000..270b3b7e445ed7cc887c0871eb9770afd91c0099 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_k_e_r_n.py @@ -0,0 +1,278 @@ +from fontTools.ttLib import getSearchRange +from fontTools.misc.textTools import safeEval, readHex +from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi +from . import DefaultTable +import struct +import sys +import array +import logging + + +log = logging.getLogger(__name__) + + +class table__k_e_r_n(DefaultTable.DefaultTable): + def getkern(self, format): + for subtable in self.kernTables: + if subtable.format == format: + return subtable + return None # not found + + def decompile(self, data, ttFont): + version, nTables = struct.unpack(">HH", data[:4]) + apple = False + if (len(data) >= 8) and (version == 1): + # AAT Apple's "new" format. Hm. + version, nTables = struct.unpack(">LL", data[:8]) + self.version = fi2fl(version, 16) + data = data[8:] + apple = True + else: + self.version = version + data = data[4:] + self.kernTables = [] + for i in range(nTables): + if self.version == 1.0: + # Apple + length, coverage, subtableFormat = struct.unpack(">LBB", data[:6]) + else: + # in OpenType spec the "version" field refers to the common + # subtable header; the actual subtable format is stored in + # the 8-15 mask bits of "coverage" field. + # This "version" is always 0 so we ignore it here + _, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) + if nTables == 1 and subtableFormat == 0: + # The "length" value is ignored since some fonts + # (like OpenSans and Calibri) have a subtable larger than + # its value. + (nPairs,) = struct.unpack(">H", data[6:8]) + calculated_length = (nPairs * 6) + 14 + if length != calculated_length: + log.warning( + "'kern' subtable longer than defined: " + "%d bytes instead of %d bytes" % (calculated_length, length) + ) + length = calculated_length + if subtableFormat not in kern_classes: + subtable = KernTable_format_unkown(subtableFormat) + else: + subtable = kern_classes[subtableFormat](apple) + subtable.decompile(data[:length], ttFont) + self.kernTables.append(subtable) + data = data[length:] + + def compile(self, ttFont): + if hasattr(self, "kernTables"): + nTables = len(self.kernTables) + else: + nTables = 0 + if self.version == 1.0: + # AAT Apple's "new" format. + data = struct.pack(">LL", fl2fi(self.version, 16), nTables) + else: + data = struct.pack(">HH", self.version, nTables) + if hasattr(self, "kernTables"): + for subtable in self.kernTables: + data = data + subtable.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + for subtable in self.kernTables: + subtable.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = safeEval(attrs["value"]) + return + if name != "kernsubtable": + return + if not hasattr(self, "kernTables"): + self.kernTables = [] + format = safeEval(attrs["format"]) + if format not in kern_classes: + subtable = KernTable_format_unkown(format) + else: + apple = self.version == 1.0 + subtable = kern_classes[format](apple) + self.kernTables.append(subtable) + subtable.fromXML(name, attrs, content, ttFont) + + +class KernTable_format_0(object): + # 'version' is kept for backward compatibility + version = format = 0 + + def __init__(self, apple=False): + self.apple = apple + + def decompile(self, data, ttFont): + if not self.apple: + version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6]) + if version != 0: + from fontTools.ttLib import TTLibError + + raise TTLibError("unsupported kern subtable version: %d" % version) + tupleIndex = None + # Should we also assert length == len(data)? + data = data[6:] + else: + length, coverage, subtableFormat, tupleIndex = struct.unpack( + ">LBBH", data[:8] + ) + data = data[8:] + assert self.format == subtableFormat, "unsupported format" + self.coverage = coverage + self.tupleIndex = tupleIndex + + self.kernTable = kernTable = {} + + nPairs, searchRange, entrySelector, rangeShift = struct.unpack( + ">HHHH", data[:8] + ) + data = data[8:] + + datas = array.array("H", data[: 6 * nPairs]) + if sys.byteorder != "big": + datas.byteswap() + it = iter(datas) + glyphOrder = ttFont.getGlyphOrder() + for k in range(nPairs): + left, right, value = next(it), next(it), next(it) + if value >= 32768: + value -= 65536 + try: + kernTable[(glyphOrder[left], glyphOrder[right])] = value + except IndexError: + # Slower, but will not throw an IndexError on an invalid + # glyph id. + kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = ( + value + ) + if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess + log.warning( + "excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs + ) + + def compile(self, ttFont): + nPairs = min(len(self.kernTable), 0xFFFF) + searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6) + searchRange &= 0xFFFF + entrySelector = min(entrySelector, 0xFFFF) + rangeShift = min(rangeShift, 0xFFFF) + data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift) + + # yeehee! (I mean, turn names into indices) + try: + reverseOrder = ttFont.getReverseGlyphMap() + kernTable = sorted( + (reverseOrder[left], reverseOrder[right], value) + for ((left, right), value) in self.kernTable.items() + ) + except KeyError: + # Slower, but will not throw KeyError on invalid glyph id. + getGlyphID = ttFont.getGlyphID + kernTable = sorted( + (getGlyphID(left), getGlyphID(right), value) + for ((left, right), value) in self.kernTable.items() + ) + + for left, right, value in kernTable: + data = data + struct.pack(">HHh", left, right, value) + + if not self.apple: + version = 0 + length = len(data) + 6 + if length >= 0x10000: + log.warning( + '"kern" subtable overflow, ' + "truncating length value while preserving pairs." + ) + length &= 0xFFFF + header = struct.pack(">HHBB", version, length, self.format, self.coverage) + else: + if self.tupleIndex is None: + # sensible default when compiling a TTX from an old fonttools + # or when inserting a Windows-style format 0 subtable into an + # Apple version=1.0 kern table + log.warning("'tupleIndex' is None; default to 0") + self.tupleIndex = 0 + length = len(data) + 8 + header = struct.pack( + ">LBBH", length, self.coverage, self.format, self.tupleIndex + ) + return header + data + + def toXML(self, writer, ttFont): + attrs = dict(coverage=self.coverage, format=self.format) + if self.apple: + if self.tupleIndex is None: + log.warning("'tupleIndex' is None; default to 0") + attrs["tupleIndex"] = 0 + else: + attrs["tupleIndex"] = self.tupleIndex + writer.begintag("kernsubtable", **attrs) + writer.newline() + items = sorted(self.kernTable.items()) + for (left, right), value in items: + writer.simpletag("pair", [("l", left), ("r", right), ("v", value)]) + writer.newline() + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.coverage = safeEval(attrs["coverage"]) + subtableFormat = safeEval(attrs["format"]) + if self.apple: + if "tupleIndex" in attrs: + self.tupleIndex = safeEval(attrs["tupleIndex"]) + else: + # previous fontTools versions didn't export tupleIndex + log.warning("Apple kern subtable is missing 'tupleIndex' attribute") + self.tupleIndex = None + else: + self.tupleIndex = None + assert subtableFormat == self.format, "unsupported format" + if not hasattr(self, "kernTable"): + self.kernTable = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"]) + + def __getitem__(self, pair): + return self.kernTable[pair] + + def __setitem__(self, pair, value): + self.kernTable[pair] = value + + def __delitem__(self, pair): + del self.kernTable[pair] + + +class KernTable_format_unkown(object): + def __init__(self, format): + self.format = format + + def decompile(self, data, ttFont): + self.data = data + + def compile(self, ttFont): + return self.data + + def toXML(self, writer, ttFont): + writer.begintag("kernsubtable", format=self.format) + writer.newline() + writer.comment("unknown 'kern' subtable format") + writer.newline() + writer.dumphex(self.data) + writer.endtag("kernsubtable") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.decompile(readHex(content), ttFont) + + +kern_classes = {0: KernTable_format_0} diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_a_x_p.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_a_x_p.py new file mode 100644 index 0000000000000000000000000000000000000000..95b6ab933590b8407af226e850e913a4b9310bef --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_a_x_p.py @@ -0,0 +1,139 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from . import DefaultTable + +maxpFormat_0_5 = """ + > # big endian + tableVersion: i + numGlyphs: H +""" + +maxpFormat_1_0_add = """ + > # big endian + maxPoints: H + maxContours: H + maxCompositePoints: H + maxCompositeContours: H + maxZones: H + maxTwilightPoints: H + maxStorage: H + maxFunctionDefs: H + maxInstructionDefs: H + maxStackElements: H + maxSizeOfInstructions: H + maxComponentElements: H + maxComponentDepth: H +""" + + +class table__m_a_x_p(DefaultTable.DefaultTable): + dependencies = ["glyf"] + + def decompile(self, data, ttFont): + dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self) + self.numGlyphs = int(self.numGlyphs) + if self.tableVersion != 0x00005000: + dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self) + assert len(data) == 0 + + def compile(self, ttFont): + if "glyf" in ttFont: + if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes: + self.recalc(ttFont) + else: + pass # CFF + self.numGlyphs = len(ttFont.getGlyphOrder()) + if self.tableVersion != 0x00005000: + self.tableVersion = 0x00010000 + data = sstruct.pack(maxpFormat_0_5, self) + if self.tableVersion == 0x00010000: + data = data + sstruct.pack(maxpFormat_1_0_add, self) + return data + + def recalc(self, ttFont): + """Recalculate the font bounding box, and most other maxp values except + for the TT instructions values. Also recalculate the value of bit 1 + of the flags field and the font bounding box of the 'head' table. + """ + glyfTable = ttFont["glyf"] + hmtxTable = ttFont["hmtx"] + headTable = ttFont["head"] + self.numGlyphs = len(glyfTable) + INFINITY = 100000 + xMin = +INFINITY + yMin = +INFINITY + xMax = -INFINITY + yMax = -INFINITY + maxPoints = 0 + maxContours = 0 + maxCompositePoints = 0 + maxCompositeContours = 0 + maxComponentElements = 0 + maxComponentDepth = 0 + allXMinIsLsb = 1 + for glyphName in ttFont.getGlyphOrder(): + g = glyfTable[glyphName] + if g.numberOfContours: + if hmtxTable[glyphName][1] != g.xMin: + allXMinIsLsb = 0 + xMin = min(xMin, g.xMin) + yMin = min(yMin, g.yMin) + xMax = max(xMax, g.xMax) + yMax = max(yMax, g.yMax) + if g.numberOfContours > 0: + nPoints, nContours = g.getMaxpValues() + maxPoints = max(maxPoints, nPoints) + maxContours = max(maxContours, nContours) + elif g.isComposite(): + nPoints, nContours, componentDepth = g.getCompositeMaxpValues( + glyfTable + ) + maxCompositePoints = max(maxCompositePoints, nPoints) + maxCompositeContours = max(maxCompositeContours, nContours) + maxComponentElements = max(maxComponentElements, len(g.components)) + maxComponentDepth = max(maxComponentDepth, componentDepth) + if xMin == +INFINITY: + headTable.xMin = 0 + headTable.yMin = 0 + headTable.xMax = 0 + headTable.yMax = 0 + else: + headTable.xMin = xMin + headTable.yMin = yMin + headTable.xMax = xMax + headTable.yMax = yMax + self.maxPoints = maxPoints + self.maxContours = maxContours + self.maxCompositePoints = maxCompositePoints + self.maxCompositeContours = maxCompositeContours + self.maxComponentElements = maxComponentElements + self.maxComponentDepth = maxComponentDepth + if allXMinIsLsb: + headTable.flags = headTable.flags | 0x2 + else: + headTable.flags = headTable.flags & ~0x2 + + def testrepr(self): + items = sorted(self.__dict__.items()) + print(". . . . . . . . .") + for combo in items: + print(" %s: %s" % combo) + print(". . . . . . . . .") + + def toXML(self, writer, ttFont): + if self.tableVersion != 0x00005000: + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5) + if self.tableVersion != 0x00005000: + formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add) + names = {**names, **names_1_0} + for name in names: + value = getattr(self, name) + if name == "tableVersion": + value = hex(value) + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + setattr(self, name, safeEval(attrs["value"])) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_t.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_t.py new file mode 100644 index 0000000000000000000000000000000000000000..261e593e27ffc7fe065b964eea533dc2591fcb1e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_t.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html +class table__m_o_r_t(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_x.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_x.py new file mode 100644 index 0000000000000000000000000000000000000000..da299c6d85893e4113c459d503d77c6a120128ae --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_m_o_r_x.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html +class table__m_o_r_x(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_n_a_m_e.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_n_a_m_e.py new file mode 100644 index 0000000000000000000000000000000000000000..e30086adb35e1f790d138faf5afaaef0bfd2de99 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_n_a_m_e.py @@ -0,0 +1,1235 @@ +# -*- coding: utf-8 -*- +from fontTools.misc import sstruct +from fontTools.misc.textTools import ( + bytechr, + byteord, + bytesjoin, + strjoin, + tobytes, + tostr, + safeEval, +) +from fontTools.misc.encodingTools import getEncoding +from fontTools.ttLib import newTable +from fontTools.ttLib.ttVisitor import TTVisitor +from fontTools import ttLib +import fontTools.ttLib.tables.otTables as otTables +from fontTools.ttLib.tables import C_P_A_L_ +from . import DefaultTable +import struct +import logging + + +log = logging.getLogger(__name__) + +nameRecordFormat = """ + > # big endian + platformID: H + platEncID: H + langID: H + nameID: H + length: H + offset: H +""" + +nameRecordSize = sstruct.calcsize(nameRecordFormat) + + +class table__n_a_m_e(DefaultTable.DefaultTable): + dependencies = ["ltag"] + + def decompile(self, data, ttFont): + format, n, stringOffset = struct.unpack(b">HHH", data[:6]) + expectedStringOffset = 6 + n * nameRecordSize + if stringOffset != expectedStringOffset: + log.error( + "'name' table stringOffset incorrect. Expected: %s; Actual: %s", + expectedStringOffset, + stringOffset, + ) + stringData = data[stringOffset:] + data = data[6:] + self.names = [] + for i in range(n): + if len(data) < 12: + log.error("skipping malformed name record #%d", i) + continue + name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord()) + name.string = stringData[name.offset : name.offset + name.length] + if name.offset + name.length > len(stringData): + log.error("skipping malformed name record #%d", i) + continue + assert len(name.string) == name.length + # if (name.platEncID, name.platformID) in ((0, 0), (1, 3)): + # if len(name.string) % 2: + # print "2-byte string doesn't have even length!" + # print name.__dict__ + del name.offset, name.length + self.names.append(name) + + def compile(self, ttFont): + if not hasattr(self, "names"): + # only happens when there are NO name table entries read + # from the TTX file + self.names = [] + names = self.names + names.sort() # sort according to the spec; see NameRecord.__lt__() + stringData = b"" + format = 0 + n = len(names) + stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat) + data = struct.pack(b">HHH", format, n, stringOffset) + lastoffset = 0 + done = {} # remember the data so we can reuse the "pointers" + for name in names: + string = name.toBytes() + if string in done: + name.offset, name.length = done[string] + else: + name.offset, name.length = done[string] = len(stringData), len(string) + stringData = bytesjoin([stringData, string]) + data = data + sstruct.pack(nameRecordFormat, name) + return data + stringData + + def toXML(self, writer, ttFont): + for name in self.names: + name.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name != "namerecord": + return # ignore unknown tags + if not hasattr(self, "names"): + self.names = [] + name = NameRecord() + self.names.append(name) + name.fromXML(name, attrs, content, ttFont) + + def getName(self, nameID, platformID, platEncID, langID=None): + for namerecord in self.names: + if ( + namerecord.nameID == nameID + and namerecord.platformID == platformID + and namerecord.platEncID == platEncID + ): + if langID is None or namerecord.langID == langID: + return namerecord + return None # not found + + def getDebugName(self, nameID): + englishName = someName = None + for name in self.names: + if name.nameID != nameID: + continue + try: + unistr = name.toUnicode() + except UnicodeDecodeError: + continue + + someName = unistr + if (name.platformID, name.langID) in ((1, 0), (3, 0x409)): + englishName = unistr + break + if englishName: + return englishName + elif someName: + return someName + else: + return None + + def getFirstDebugName(self, nameIDs): + for nameID in nameIDs: + name = self.getDebugName(nameID) + if name is not None: + return name + return None + + def getBestFamilyName(self): + # 21 = WWS Family Name + # 16 = Typographic Family Name + # 1 = Family Name + return self.getFirstDebugName((21, 16, 1)) + + def getBestSubFamilyName(self): + # 22 = WWS SubFamily Name + # 17 = Typographic SubFamily Name + # 2 = SubFamily Name + return self.getFirstDebugName((22, 17, 2)) + + def getBestFullName(self): + # 4 = Full Name + # 6 = PostScript Name + for nameIDs in ((21, 22), (16, 17), (1, 2), (4,), (6,)): + if len(nameIDs) == 2: + name_fam = self.getDebugName(nameIDs[0]) + name_subfam = self.getDebugName(nameIDs[1]) + if None in [name_fam, name_subfam]: + continue # if any is None, skip + name = f"{name_fam} {name_subfam}" + if name_subfam.lower() == "regular": + name = f"{name_fam}" + return name + else: + name = self.getDebugName(nameIDs[0]) + if name is not None: + return name + return None + + def setName(self, string, nameID, platformID, platEncID, langID): + """Set the 'string' for the name record identified by 'nameID', 'platformID', + 'platEncID' and 'langID'. If a record with that nameID doesn't exist, create it + and append to the name table. + + 'string' can be of type `str` (`unicode` in PY2) or `bytes`. In the latter case, + it is assumed to be already encoded with the correct plaform-specific encoding + identified by the (platformID, platEncID, langID) triplet. A warning is issued + to prevent unexpected results. + """ + if not hasattr(self, "names"): + self.names = [] + if not isinstance(string, str): + if isinstance(string, bytes): + log.warning( + "name string is bytes, ensure it's correctly encoded: %r", string + ) + else: + raise TypeError( + "expected unicode or bytes, found %s: %r" + % (type(string).__name__, string) + ) + namerecord = self.getName(nameID, platformID, platEncID, langID) + if namerecord: + namerecord.string = string + else: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + + def removeNames(self, nameID=None, platformID=None, platEncID=None, langID=None): + """Remove any name records identified by the given combination of 'nameID', + 'platformID', 'platEncID' and 'langID'. + """ + args = { + argName: argValue + for argName, argValue in ( + ("nameID", nameID), + ("platformID", platformID), + ("platEncID", platEncID), + ("langID", langID), + ) + if argValue is not None + } + if not args: + # no arguments, nothing to do + return + self.names = [ + rec + for rec in self.names + if any( + argValue != getattr(rec, argName) for argName, argValue in args.items() + ) + ] + + @staticmethod + def removeUnusedNames(ttFont): + """Remove any name records which are not in NameID range 0-255 and not utilized + within the font itself.""" + visitor = NameRecordVisitor() + visitor.visit(ttFont) + toDelete = set() + for record in ttFont["name"].names: + # Name IDs 26 to 255, inclusive, are reserved for future standard names. + # https://learn.microsoft.com/en-us/typography/opentype/spec/name#name-ids + if record.nameID < 256: + continue + if record.nameID not in visitor.seen: + toDelete.add(record.nameID) + + for nameID in toDelete: + ttFont["name"].removeNames(nameID) + return toDelete + + def _findUnusedNameID(self, minNameID=256): + """Finds an unused name id. + + The nameID is assigned in the range between 'minNameID' and 32767 (inclusive), + following the last nameID in the name table. + """ + names = getattr(self, "names", []) + nameID = 1 + max([n.nameID for n in names] + [minNameID - 1]) + if nameID > 32767: + raise ValueError("nameID must be less than 32768") + return nameID + + def findMultilingualName( + self, names, windows=True, mac=True, minNameID=0, ttFont=None + ): + """Return the name ID of an existing multilingual name that + matches the 'names' dictionary, or None if not found. + + 'names' is a dictionary with the name in multiple languages, + such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. + The keys can be arbitrary IETF BCP 47 language codes; + the values are Unicode strings. + + If 'windows' is True, the returned name ID is guaranteed + exist for all requested languages for platformID=3 and + platEncID=1. + If 'mac' is True, the returned name ID is guaranteed to exist + for all requested languages for platformID=1 and platEncID=0. + + The returned name ID will not be less than the 'minNameID' + argument. + """ + # Gather the set of requested + # (string, platformID, platEncID, langID) + # tuples + reqNameSet = set() + for lang, name in sorted(names.items()): + if windows: + windowsName = _makeWindowsName(name, None, lang) + if windowsName is not None: + reqNameSet.add( + ( + windowsName.string, + windowsName.platformID, + windowsName.platEncID, + windowsName.langID, + ) + ) + if mac: + macName = _makeMacName(name, None, lang, ttFont) + if macName is not None: + reqNameSet.add( + ( + macName.string, + macName.platformID, + macName.platEncID, + macName.langID, + ) + ) + + # Collect matching name IDs + matchingNames = dict() + for name in self.names: + try: + key = (name.toUnicode(), name.platformID, name.platEncID, name.langID) + except UnicodeDecodeError: + continue + if key in reqNameSet and name.nameID >= minNameID: + nameSet = matchingNames.setdefault(name.nameID, set()) + nameSet.add(key) + + # Return the first name ID that defines all requested strings + for nameID, nameSet in sorted(matchingNames.items()): + if nameSet == reqNameSet: + return nameID + + return None # not found + + def addMultilingualName( + self, names, ttFont=None, nameID=None, windows=True, mac=True, minNameID=0 + ): + """Add a multilingual name, returning its name ID + + 'names' is a dictionary with the name in multiple languages, + such as {'en': 'Pale', 'de': 'Blaß', 'de-CH': 'Blass'}. + The keys can be arbitrary IETF BCP 47 language codes; + the values are Unicode strings. + + 'ttFont' is the TTFont to which the names are added, or None. + If present, the font's 'ltag' table can get populated + to store exotic language codes, which allows encoding + names that otherwise cannot get encoded at all. + + 'nameID' is the name ID to be used, or None to let the library + find an existing set of name records that match, or pick an + unused name ID. + + If 'windows' is True, a platformID=3 name record will be added. + If 'mac' is True, a platformID=1 name record will be added. + + If the 'nameID' argument is None, the created nameID will not + be less than the 'minNameID' argument. + """ + if not hasattr(self, "names"): + self.names = [] + if nameID is None: + # Reuse nameID if possible + nameID = self.findMultilingualName( + names, windows=windows, mac=mac, minNameID=minNameID, ttFont=ttFont + ) + if nameID is not None: + return nameID + nameID = self._findUnusedNameID() + # TODO: Should minimize BCP 47 language codes. + # https://github.com/fonttools/fonttools/issues/930 + for lang, name in sorted(names.items()): + if windows: + windowsName = _makeWindowsName(name, nameID, lang) + if windowsName is not None: + self.names.append(windowsName) + else: + # We cannot not make a Windows name: make sure we add a + # Mac name as a fallback. This can happen for exotic + # BCP47 language tags that have no Windows language code. + mac = True + if mac: + macName = _makeMacName(name, nameID, lang, ttFont) + if macName is not None: + self.names.append(macName) + return nameID + + def addName(self, string, platforms=((1, 0, 0), (3, 1, 0x409)), minNameID=255): + """Add a new name record containing 'string' for each (platformID, platEncID, + langID) tuple specified in the 'platforms' list. + + The nameID is assigned in the range between 'minNameID'+1 and 32767 (inclusive), + following the last nameID in the name table. + If no 'platforms' are specified, two English name records are added, one for the + Macintosh (platformID=0), and one for the Windows platform (3). + + The 'string' must be a Unicode string, so it can be encoded with different, + platform-specific encodings. + + Return the new nameID. + """ + assert ( + len(platforms) > 0 + ), "'platforms' must contain at least one (platformID, platEncID, langID) tuple" + if not hasattr(self, "names"): + self.names = [] + if not isinstance(string, str): + raise TypeError( + "expected str, found %s: %r" % (type(string).__name__, string) + ) + nameID = self._findUnusedNameID(minNameID + 1) + for platformID, platEncID, langID in platforms: + self.names.append(makeName(string, nameID, platformID, platEncID, langID)) + return nameID + + +def makeName(string, nameID, platformID, platEncID, langID): + name = NameRecord() + name.string, name.nameID, name.platformID, name.platEncID, name.langID = ( + string, + nameID, + platformID, + platEncID, + langID, + ) + return name + + +def _makeWindowsName(name, nameID, language): + """Create a NameRecord for the Microsoft Windows platform + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. If Microsoft Windows + does not support the desired language, the result will be None. + Future versions of fonttools might return a NameRecord for the + OpenType 'name' table format 1, but this is not implemented yet. + """ + langID = _WINDOWS_LANGUAGE_CODES.get(language.lower()) + if langID is not None: + return makeName(name, nameID, 3, 1, langID) + else: + log.warning( + "cannot add Windows name in language %s " + "because fonttools does not yet support " + "name table format 1" % language + ) + return None + + +def _makeMacName(name, nameID, language, font=None): + """Create a NameRecord for Apple platforms + + 'language' is an arbitrary IETF BCP 47 language identifier such + as 'en', 'de-CH', 'de-AT-1901', or 'fa-Latn'. When possible, we + create a Macintosh NameRecord that is understood by old applications + (platform ID 1 and an old-style Macintosh language enum). If this + is not possible, we create a Unicode NameRecord (platform ID 0) + whose language points to the font’s 'ltag' table. The latter + can encode any string in any language, but legacy applications + might not recognize the format (in which case they will ignore + those names). + + 'font' should be the TTFont for which you want to create a name. + If 'font' is None, we only return NameRecords for legacy Macintosh; + in that case, the result will be None for names that need to + be encoded with an 'ltag' table. + + See the section “The language identifier” in Apple’s specification: + https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + """ + macLang = _MAC_LANGUAGE_CODES.get(language.lower()) + macScript = _MAC_LANGUAGE_TO_SCRIPT.get(macLang) + if macLang is not None and macScript is not None: + encoding = getEncoding(1, macScript, macLang, default="ascii") + # Check if we can actually encode this name. If we can't, + # for example because we have no support for the legacy + # encoding, or because the name string contains Unicode + # characters that the legacy encoding cannot represent, + # we fall back to encoding the name in Unicode and put + # the language tag into the ltag table. + try: + _ = tobytes(name, encoding, errors="strict") + return makeName(name, nameID, 1, macScript, macLang) + except UnicodeEncodeError: + pass + if font is not None: + ltag = font.tables.get("ltag") + if ltag is None: + ltag = font["ltag"] = newTable("ltag") + # 0 = Unicode; 4 = “Unicode 2.0 or later semantics (non-BMP characters allowed)” + # “The preferred platform-specific code for Unicode would be 3 or 4.” + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6name.html + return makeName(name, nameID, 0, 4, ltag.addTag(language)) + else: + log.warning( + "cannot store language %s into 'ltag' table " + "without having access to the TTFont object" % language + ) + return None + + +class NameRecord(object): + def getEncoding(self, default="ascii"): + """Returns the Python encoding name for this name entry based on its platformID, + platEncID, and langID. If encoding for these values is not known, by default + 'ascii' is returned. That can be overriden by passing a value to the default + argument. + """ + return getEncoding(self.platformID, self.platEncID, self.langID, default) + + def encodingIsUnicodeCompatible(self): + return self.getEncoding(None) in ["utf_16_be", "ucs2be", "ascii", "latin1"] + + def __str__(self): + return self.toStr(errors="backslashreplace") + + def isUnicode(self): + return self.platformID == 0 or ( + self.platformID == 3 and self.platEncID in [0, 1, 10] + ) + + def toUnicode(self, errors="strict"): + """ + If self.string is a Unicode string, return it; otherwise try decoding the + bytes in self.string to a Unicode string using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + Certain heuristics are performed to recover data from bytes that are + ill-formed in the chosen encoding, or that otherwise look misencoded + (mostly around bad UTF-16BE encoded bytes, or bytes that look like UTF-16BE + but marked otherwise). If the bytes are ill-formed and the heuristics fail, + the error is handled according to the errors parameter to this function, which is + passed to the underlying decode() function; by default it throws a + UnicodeDecodeError exception. + + Note: The mentioned heuristics mean that roundtripping a font to XML and back + to binary might recover some misencoded data whereas just loading the font + and saving it back will not change them. + """ + + def isascii(b): + return (b >= 0x20 and b <= 0x7E) or b in [0x09, 0x0A, 0x0D] + + encoding = self.getEncoding() + string = self.string + + if ( + isinstance(string, bytes) + and encoding == "utf_16_be" + and len(string) % 2 == 1 + ): + # Recover badly encoded UTF-16 strings that have an odd number of bytes: + # - If the last byte is zero, drop it. Otherwise, + # - If all the odd bytes are zero and all the even bytes are ASCII, + # prepend one zero byte. Otherwise, + # - If first byte is zero and all other bytes are ASCII, insert zero + # bytes between consecutive ASCII bytes. + # + # (Yes, I've seen all of these in the wild... sigh) + if byteord(string[-1]) == 0: + string = string[:-1] + elif all( + byteord(b) == 0 if i % 2 else isascii(byteord(b)) + for i, b in enumerate(string) + ): + string = b"\0" + string + elif byteord(string[0]) == 0 and all( + isascii(byteord(b)) for b in string[1:] + ): + string = bytesjoin(b"\0" + bytechr(byteord(b)) for b in string[1:]) + + string = tostr(string, encoding=encoding, errors=errors) + + # If decoded strings still looks like UTF-16BE, it suggests a double-encoding. + # Fix it up. + if all( + ord(c) == 0 if i % 2 == 0 else isascii(ord(c)) for i, c in enumerate(string) + ): + # If string claims to be Mac encoding, but looks like UTF-16BE with ASCII text, + # narrow it down. + string = "".join(c for c in string[1::2]) + + return string + + def toBytes(self, errors="strict"): + """If self.string is a bytes object, return it; otherwise try encoding + the Unicode string in self.string to bytes using the encoding of this + entry as returned by self.getEncoding(); Note that self.getEncoding() + returns 'ascii' if the encoding is unknown to the library. + + If the Unicode string cannot be encoded to bytes in the chosen encoding, + the error is handled according to the errors parameter to this function, + which is passed to the underlying encode() function; by default it throws a + UnicodeEncodeError exception. + """ + return tobytes(self.string, encoding=self.getEncoding(), errors=errors) + + toStr = toUnicode + + def toXML(self, writer, ttFont): + try: + unistr = self.toUnicode() + except UnicodeDecodeError: + unistr = None + attrs = [ + ("nameID", self.nameID), + ("platformID", self.platformID), + ("platEncID", self.platEncID), + ("langID", hex(self.langID)), + ] + + if unistr is None or not self.encodingIsUnicodeCompatible(): + attrs.append(("unicode", unistr is not None)) + + writer.begintag("namerecord", attrs) + writer.newline() + if unistr is not None: + writer.write(unistr) + else: + writer.write8bit(self.string) + writer.newline() + writer.endtag("namerecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.nameID = safeEval(attrs["nameID"]) + self.platformID = safeEval(attrs["platformID"]) + self.platEncID = safeEval(attrs["platEncID"]) + self.langID = safeEval(attrs["langID"]) + s = strjoin(content).strip() + encoding = self.getEncoding() + if self.encodingIsUnicodeCompatible() or safeEval( + attrs.get("unicode", "False") + ): + self.string = s.encode(encoding) + else: + # This is the inverse of write8bit... + self.string = s.encode("latin1") + + def __lt__(self, other): + if type(self) != type(other): + return NotImplemented + + try: + selfTuple = ( + self.platformID, + self.platEncID, + self.langID, + self.nameID, + ) + otherTuple = ( + other.platformID, + other.platEncID, + other.langID, + other.nameID, + ) + except AttributeError: + # This can only happen for + # 1) an object that is not a NameRecord, or + # 2) an unlikely incomplete NameRecord object which has not been + # fully populated + return NotImplemented + + try: + # Include the actual NameRecord string in the comparison tuples + selfTuple = selfTuple + (self.toBytes(),) + otherTuple = otherTuple + (other.toBytes(),) + except UnicodeEncodeError as e: + # toBytes caused an encoding error in either of the two, so content + # to sorting based on IDs only + log.error("NameRecord sorting failed to encode: %s" % e) + + # Implemented so that list.sort() sorts according to the spec by using + # the order of the tuple items and their comparison + return selfTuple < otherTuple + + def __repr__(self): + return "" % ( + self.nameID, + self.platformID, + self.langID, + ) + + +# Windows language ID → IETF BCP-47 language tag +# +# While Microsoft indicates a region/country for all its language +# IDs, we follow Unicode practice by omitting “most likely subtags” +# as per Unicode CLDR. For example, English is simply “en” and not +# “en-Latn” because according to Unicode, the default script +# for English is Latin. +# +# http://www.unicode.org/cldr/charts/latest/supplemental/likely_subtags.html +# http://www.iana.org/assignments/language-subtag-registry/language-subtag-registry +_WINDOWS_LANGUAGES = { + 0x0436: "af", + 0x041C: "sq", + 0x0484: "gsw", + 0x045E: "am", + 0x1401: "ar-DZ", + 0x3C01: "ar-BH", + 0x0C01: "ar", + 0x0801: "ar-IQ", + 0x2C01: "ar-JO", + 0x3401: "ar-KW", + 0x3001: "ar-LB", + 0x1001: "ar-LY", + 0x1801: "ary", + 0x2001: "ar-OM", + 0x4001: "ar-QA", + 0x0401: "ar-SA", + 0x2801: "ar-SY", + 0x1C01: "aeb", + 0x3801: "ar-AE", + 0x2401: "ar-YE", + 0x042B: "hy", + 0x044D: "as", + 0x082C: "az-Cyrl", + 0x042C: "az", + 0x046D: "ba", + 0x042D: "eu", + 0x0423: "be", + 0x0845: "bn", + 0x0445: "bn-IN", + 0x201A: "bs-Cyrl", + 0x141A: "bs", + 0x047E: "br", + 0x0402: "bg", + 0x0403: "ca", + 0x0C04: "zh-HK", + 0x1404: "zh-MO", + 0x0804: "zh", + 0x1004: "zh-SG", + 0x0404: "zh-TW", + 0x0483: "co", + 0x041A: "hr", + 0x101A: "hr-BA", + 0x0405: "cs", + 0x0406: "da", + 0x048C: "prs", + 0x0465: "dv", + 0x0813: "nl-BE", + 0x0413: "nl", + 0x0C09: "en-AU", + 0x2809: "en-BZ", + 0x1009: "en-CA", + 0x2409: "en-029", + 0x4009: "en-IN", + 0x1809: "en-IE", + 0x2009: "en-JM", + 0x4409: "en-MY", + 0x1409: "en-NZ", + 0x3409: "en-PH", + 0x4809: "en-SG", + 0x1C09: "en-ZA", + 0x2C09: "en-TT", + 0x0809: "en-GB", + 0x0409: "en", + 0x3009: "en-ZW", + 0x0425: "et", + 0x0438: "fo", + 0x0464: "fil", + 0x040B: "fi", + 0x080C: "fr-BE", + 0x0C0C: "fr-CA", + 0x040C: "fr", + 0x140C: "fr-LU", + 0x180C: "fr-MC", + 0x100C: "fr-CH", + 0x0462: "fy", + 0x0456: "gl", + 0x0437: "ka", + 0x0C07: "de-AT", + 0x0407: "de", + 0x1407: "de-LI", + 0x1007: "de-LU", + 0x0807: "de-CH", + 0x0408: "el", + 0x046F: "kl", + 0x0447: "gu", + 0x0468: "ha", + 0x040D: "he", + 0x0439: "hi", + 0x040E: "hu", + 0x040F: "is", + 0x0470: "ig", + 0x0421: "id", + 0x045D: "iu", + 0x085D: "iu-Latn", + 0x083C: "ga", + 0x0434: "xh", + 0x0435: "zu", + 0x0410: "it", + 0x0810: "it-CH", + 0x0411: "ja", + 0x044B: "kn", + 0x043F: "kk", + 0x0453: "km", + 0x0486: "quc", + 0x0487: "rw", + 0x0441: "sw", + 0x0457: "kok", + 0x0412: "ko", + 0x0440: "ky", + 0x0454: "lo", + 0x0426: "lv", + 0x0427: "lt", + 0x082E: "dsb", + 0x046E: "lb", + 0x042F: "mk", + 0x083E: "ms-BN", + 0x043E: "ms", + 0x044C: "ml", + 0x043A: "mt", + 0x0481: "mi", + 0x047A: "arn", + 0x044E: "mr", + 0x047C: "moh", + 0x0450: "mn", + 0x0850: "mn-CN", + 0x0461: "ne", + 0x0414: "nb", + 0x0814: "nn", + 0x0482: "oc", + 0x0448: "or", + 0x0463: "ps", + 0x0415: "pl", + 0x0416: "pt", + 0x0816: "pt-PT", + 0x0446: "pa", + 0x046B: "qu-BO", + 0x086B: "qu-EC", + 0x0C6B: "qu", + 0x0418: "ro", + 0x0417: "rm", + 0x0419: "ru", + 0x243B: "smn", + 0x103B: "smj-NO", + 0x143B: "smj", + 0x0C3B: "se-FI", + 0x043B: "se", + 0x083B: "se-SE", + 0x203B: "sms", + 0x183B: "sma-NO", + 0x1C3B: "sms", + 0x044F: "sa", + 0x1C1A: "sr-Cyrl-BA", + 0x0C1A: "sr", + 0x181A: "sr-Latn-BA", + 0x081A: "sr-Latn", + 0x046C: "nso", + 0x0432: "tn", + 0x045B: "si", + 0x041B: "sk", + 0x0424: "sl", + 0x2C0A: "es-AR", + 0x400A: "es-BO", + 0x340A: "es-CL", + 0x240A: "es-CO", + 0x140A: "es-CR", + 0x1C0A: "es-DO", + 0x300A: "es-EC", + 0x440A: "es-SV", + 0x100A: "es-GT", + 0x480A: "es-HN", + 0x080A: "es-MX", + 0x4C0A: "es-NI", + 0x180A: "es-PA", + 0x3C0A: "es-PY", + 0x280A: "es-PE", + 0x500A: "es-PR", + # Microsoft has defined two different language codes for + # “Spanish with modern sorting” and “Spanish with traditional + # sorting”. This makes sense for collation APIs, and it would be + # possible to express this in BCP 47 language tags via Unicode + # extensions (eg., “es-u-co-trad” is “Spanish with traditional + # sorting”). However, for storing names in fonts, this distinction + # does not make sense, so we use “es” in both cases. + 0x0C0A: "es", + 0x040A: "es", + 0x540A: "es-US", + 0x380A: "es-UY", + 0x200A: "es-VE", + 0x081D: "sv-FI", + 0x041D: "sv", + 0x045A: "syr", + 0x0428: "tg", + 0x085F: "tzm", + 0x0449: "ta", + 0x0444: "tt", + 0x044A: "te", + 0x041E: "th", + 0x0451: "bo", + 0x041F: "tr", + 0x0442: "tk", + 0x0480: "ug", + 0x0422: "uk", + 0x042E: "hsb", + 0x0420: "ur", + 0x0843: "uz-Cyrl", + 0x0443: "uz", + 0x042A: "vi", + 0x0452: "cy", + 0x0488: "wo", + 0x0485: "sah", + 0x0478: "ii", + 0x046A: "yo", +} + + +_MAC_LANGUAGES = { + 0: "en", + 1: "fr", + 2: "de", + 3: "it", + 4: "nl", + 5: "sv", + 6: "es", + 7: "da", + 8: "pt", + 9: "no", + 10: "he", + 11: "ja", + 12: "ar", + 13: "fi", + 14: "el", + 15: "is", + 16: "mt", + 17: "tr", + 18: "hr", + 19: "zh-Hant", + 20: "ur", + 21: "hi", + 22: "th", + 23: "ko", + 24: "lt", + 25: "pl", + 26: "hu", + 27: "es", + 28: "lv", + 29: "se", + 30: "fo", + 31: "fa", + 32: "ru", + 33: "zh", + 34: "nl-BE", + 35: "ga", + 36: "sq", + 37: "ro", + 38: "cz", + 39: "sk", + 40: "sl", + 41: "yi", + 42: "sr", + 43: "mk", + 44: "bg", + 45: "uk", + 46: "be", + 47: "uz", + 48: "kk", + 49: "az-Cyrl", + 50: "az-Arab", + 51: "hy", + 52: "ka", + 53: "mo", + 54: "ky", + 55: "tg", + 56: "tk", + 57: "mn-CN", + 58: "mn", + 59: "ps", + 60: "ks", + 61: "ku", + 62: "sd", + 63: "bo", + 64: "ne", + 65: "sa", + 66: "mr", + 67: "bn", + 68: "as", + 69: "gu", + 70: "pa", + 71: "or", + 72: "ml", + 73: "kn", + 74: "ta", + 75: "te", + 76: "si", + 77: "my", + 78: "km", + 79: "lo", + 80: "vi", + 81: "id", + 82: "tl", + 83: "ms", + 84: "ms-Arab", + 85: "am", + 86: "ti", + 87: "om", + 88: "so", + 89: "sw", + 90: "rw", + 91: "rn", + 92: "ny", + 93: "mg", + 94: "eo", + 128: "cy", + 129: "eu", + 130: "ca", + 131: "la", + 132: "qu", + 133: "gn", + 134: "ay", + 135: "tt", + 136: "ug", + 137: "dz", + 138: "jv", + 139: "su", + 140: "gl", + 141: "af", + 142: "br", + 143: "iu", + 144: "gd", + 145: "gv", + 146: "ga", + 147: "to", + 148: "el-polyton", + 149: "kl", + 150: "az", + 151: "nn", +} + + +_WINDOWS_LANGUAGE_CODES = { + lang.lower(): code for code, lang in _WINDOWS_LANGUAGES.items() +} +_MAC_LANGUAGE_CODES = {lang.lower(): code for code, lang in _MAC_LANGUAGES.items()} + + +# MacOS language ID → MacOS script ID +# +# Note that the script ID is not sufficient to determine what encoding +# to use in TrueType files. For some languages, MacOS used a modification +# of a mainstream script. For example, an Icelandic name would be stored +# with smRoman in the TrueType naming table, but the actual encoding +# is a special Icelandic version of the normal Macintosh Roman encoding. +# As another example, Inuktitut uses an 8-bit encoding for Canadian Aboriginal +# Syllables but MacOS had run out of available script codes, so this was +# done as a (pretty radical) “modification” of Ethiopic. +# +# http://unicode.org/Public/MAPPINGS/VENDORS/APPLE/Readme.txt +_MAC_LANGUAGE_TO_SCRIPT = { + 0: 0, # langEnglish → smRoman + 1: 0, # langFrench → smRoman + 2: 0, # langGerman → smRoman + 3: 0, # langItalian → smRoman + 4: 0, # langDutch → smRoman + 5: 0, # langSwedish → smRoman + 6: 0, # langSpanish → smRoman + 7: 0, # langDanish → smRoman + 8: 0, # langPortuguese → smRoman + 9: 0, # langNorwegian → smRoman + 10: 5, # langHebrew → smHebrew + 11: 1, # langJapanese → smJapanese + 12: 4, # langArabic → smArabic + 13: 0, # langFinnish → smRoman + 14: 6, # langGreek → smGreek + 15: 0, # langIcelandic → smRoman (modified) + 16: 0, # langMaltese → smRoman + 17: 0, # langTurkish → smRoman (modified) + 18: 0, # langCroatian → smRoman (modified) + 19: 2, # langTradChinese → smTradChinese + 20: 4, # langUrdu → smArabic + 21: 9, # langHindi → smDevanagari + 22: 21, # langThai → smThai + 23: 3, # langKorean → smKorean + 24: 29, # langLithuanian → smCentralEuroRoman + 25: 29, # langPolish → smCentralEuroRoman + 26: 29, # langHungarian → smCentralEuroRoman + 27: 29, # langEstonian → smCentralEuroRoman + 28: 29, # langLatvian → smCentralEuroRoman + 29: 0, # langSami → smRoman + 30: 0, # langFaroese → smRoman (modified) + 31: 4, # langFarsi → smArabic (modified) + 32: 7, # langRussian → smCyrillic + 33: 25, # langSimpChinese → smSimpChinese + 34: 0, # langFlemish → smRoman + 35: 0, # langIrishGaelic → smRoman (modified) + 36: 0, # langAlbanian → smRoman + 37: 0, # langRomanian → smRoman (modified) + 38: 29, # langCzech → smCentralEuroRoman + 39: 29, # langSlovak → smCentralEuroRoman + 40: 0, # langSlovenian → smRoman (modified) + 41: 5, # langYiddish → smHebrew + 42: 7, # langSerbian → smCyrillic + 43: 7, # langMacedonian → smCyrillic + 44: 7, # langBulgarian → smCyrillic + 45: 7, # langUkrainian → smCyrillic (modified) + 46: 7, # langByelorussian → smCyrillic + 47: 7, # langUzbek → smCyrillic + 48: 7, # langKazakh → smCyrillic + 49: 7, # langAzerbaijani → smCyrillic + 50: 4, # langAzerbaijanAr → smArabic + 51: 24, # langArmenian → smArmenian + 52: 23, # langGeorgian → smGeorgian + 53: 7, # langMoldavian → smCyrillic + 54: 7, # langKirghiz → smCyrillic + 55: 7, # langTajiki → smCyrillic + 56: 7, # langTurkmen → smCyrillic + 57: 27, # langMongolian → smMongolian + 58: 7, # langMongolianCyr → smCyrillic + 59: 4, # langPashto → smArabic + 60: 4, # langKurdish → smArabic + 61: 4, # langKashmiri → smArabic + 62: 4, # langSindhi → smArabic + 63: 26, # langTibetan → smTibetan + 64: 9, # langNepali → smDevanagari + 65: 9, # langSanskrit → smDevanagari + 66: 9, # langMarathi → smDevanagari + 67: 13, # langBengali → smBengali + 68: 13, # langAssamese → smBengali + 69: 11, # langGujarati → smGujarati + 70: 10, # langPunjabi → smGurmukhi + 71: 12, # langOriya → smOriya + 72: 17, # langMalayalam → smMalayalam + 73: 16, # langKannada → smKannada + 74: 14, # langTamil → smTamil + 75: 15, # langTelugu → smTelugu + 76: 18, # langSinhalese → smSinhalese + 77: 19, # langBurmese → smBurmese + 78: 20, # langKhmer → smKhmer + 79: 22, # langLao → smLao + 80: 30, # langVietnamese → smVietnamese + 81: 0, # langIndonesian → smRoman + 82: 0, # langTagalog → smRoman + 83: 0, # langMalayRoman → smRoman + 84: 4, # langMalayArabic → smArabic + 85: 28, # langAmharic → smEthiopic + 86: 28, # langTigrinya → smEthiopic + 87: 28, # langOromo → smEthiopic + 88: 0, # langSomali → smRoman + 89: 0, # langSwahili → smRoman + 90: 0, # langKinyarwanda → smRoman + 91: 0, # langRundi → smRoman + 92: 0, # langNyanja → smRoman + 93: 0, # langMalagasy → smRoman + 94: 0, # langEsperanto → smRoman + 128: 0, # langWelsh → smRoman (modified) + 129: 0, # langBasque → smRoman + 130: 0, # langCatalan → smRoman + 131: 0, # langLatin → smRoman + 132: 0, # langQuechua → smRoman + 133: 0, # langGuarani → smRoman + 134: 0, # langAymara → smRoman + 135: 7, # langTatar → smCyrillic + 136: 4, # langUighur → smArabic + 137: 26, # langDzongkha → smTibetan + 138: 0, # langJavaneseRom → smRoman + 139: 0, # langSundaneseRom → smRoman + 140: 0, # langGalician → smRoman + 141: 0, # langAfrikaans → smRoman + 142: 0, # langBreton → smRoman (modified) + 143: 28, # langInuktitut → smEthiopic (modified) + 144: 0, # langScottishGaelic → smRoman (modified) + 145: 0, # langManxGaelic → smRoman (modified) + 146: 0, # langIrishGaelicScript → smRoman (modified) + 147: 0, # langTongan → smRoman + 148: 6, # langGreekAncient → smRoman + 149: 0, # langGreenlandic → smRoman + 150: 0, # langAzerbaijanRoman → smRoman + 151: 0, # langNynorsk → smRoman +} + + +class NameRecordVisitor(TTVisitor): + # Font tables that have NameIDs we need to collect. + TABLES = ("GSUB", "GPOS", "fvar", "CPAL", "STAT") + + def __init__(self): + self.seen = set() + + +@NameRecordVisitor.register_attrs( + ( + (otTables.FeatureParamsSize, ("SubfamilyNameID",)), + (otTables.FeatureParamsStylisticSet, ("UINameID",)), + (otTables.STAT, ("ElidedFallbackNameID",)), + (otTables.AxisRecord, ("AxisNameID",)), + (otTables.AxisValue, ("ValueNameID",)), + (otTables.FeatureName, ("FeatureNameID",)), + (otTables.Setting, ("SettingNameID",)), + ) +) +def visit(visitor, obj, attr, value): + visitor.seen.add(value) + + +@NameRecordVisitor.register(otTables.FeatureParamsCharacterVariants) +def visit(visitor, obj): + for attr in ("FeatUILabelNameID", "FeatUITooltipTextNameID", "SampleTextNameID"): + value = getattr(obj, attr) + visitor.seen.add(value) + # also include the sequence of UI strings for individual variants, if any + if obj.FirstParamUILabelNameID == 0 or obj.NumNamedParameters == 0: + return + visitor.seen.update( + range( + obj.FirstParamUILabelNameID, + obj.FirstParamUILabelNameID + obj.NumNamedParameters, + ) + ) + + +@NameRecordVisitor.register(ttLib.getTableClass("fvar")) +def visit(visitor, obj): + for inst in obj.instances: + if inst.postscriptNameID != 0xFFFF: + visitor.seen.add(inst.postscriptNameID) + visitor.seen.add(inst.subfamilyNameID) + + for axis in obj.axes: + visitor.seen.add(axis.axisNameID) + + +@NameRecordVisitor.register(ttLib.getTableClass("CPAL")) +def visit(visitor, obj): + if obj.version == 1: + visitor.seen.update(obj.paletteLabels) + visitor.seen.update(obj.paletteEntryLabels) + + +@NameRecordVisitor.register(ttLib.TTFont) +def visit(visitor, font, *args, **kwargs): + if hasattr(visitor, "font"): + return False + + visitor.font = font + for tag in visitor.TABLES: + if tag in font: + visitor.visit(font[tag], *args, **kwargs) + del visitor.font + return False diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py new file mode 100644 index 0000000000000000000000000000000000000000..b22af216bb2e2ddb8af1cd3f991d4ede69471076 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_o_p_b_d.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html +class table__o_p_b_d(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_m_t_x.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_m_t_x.py new file mode 100644 index 0000000000000000000000000000000000000000..a13304c321f20dfa9044dc0fc3dd32ec57347061 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_m_t_x.py @@ -0,0 +1,10 @@ +from fontTools import ttLib + +superclass = ttLib.getTableClass("hmtx") + + +class table__v_m_t_x(superclass): + headerTag = "vhea" + advanceName = "height" + sideBearingName = "tsb" + numberOfMetricsName = "numberOfVMetrics" diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..785684b1eb30a76ae598bfe46416d4556fc422a0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/grUtils.py @@ -0,0 +1,92 @@ +import struct, warnings + +try: + import lz4 +except ImportError: + lz4 = None +else: + import lz4.block + +# old scheme for VERSION < 0.9 otherwise use lz4.block + + +def decompress(data): + (compression,) = struct.unpack(">L", data[4:8]) + scheme = compression >> 27 + size = compression & 0x07FFFFFF + if scheme == 0: + pass + elif scheme == 1 and lz4: + res = lz4.block.decompress(struct.pack("L", (scheme << 27) + (len(data) & 0x07FFFFFF)) + if scheme == 0: + return data + elif scheme == 1 and lz4: + res = lz4.block.compress( + data, mode="high_compression", compression=16, store_size=False + ) + return hdr + res + else: + warnings.warn("Table failed to compress by unsupported compression scheme") + return data + + +def _entries(attrs, sameval): + ak = 0 + vals = [] + lastv = 0 + for k, v in attrs: + if len(vals) and (k != ak + 1 or (sameval and v != lastv)): + yield (ak - len(vals) + 1, len(vals), vals) + vals = [] + ak = k + vals.append(v) + lastv = v + yield (ak - len(vals) + 1, len(vals), vals) + + +def entries(attributes, sameval=False): + g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval) + return g + + +def bininfo(num, size=1): + if num == 0: + return struct.pack(">4H", 0, 0, 0, 0) + srange = 1 + select = 0 + while srange <= num: + srange *= 2 + select += 1 + select -= 1 + srange //= 2 + srange *= size + shift = num * size - srange + return struct.pack(">4H", num, srange, select, shift) + + +def num2tag(n): + if n < 0x200000: + return str(n) + else: + return ( + struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode() + ) + + +def tag2num(n): + try: + return int(n) + except ValueError: + n = (n + " ")[:4] + return struct.unpack(">L", n.encode("ascii"))[0] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otBase.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otBase.py new file mode 100644 index 0000000000000000000000000000000000000000..8df7c236b1c163cf4487ea08a7456d804b34aabe --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otBase.py @@ -0,0 +1,1465 @@ +from fontTools.config import OPTIONS +from fontTools.misc.textTools import Tag, bytesjoin +from .DefaultTable import DefaultTable +from enum import IntEnum +import sys +import array +import struct +import logging +from functools import lru_cache +from typing import Iterator, NamedTuple, Optional, Tuple + +log = logging.getLogger(__name__) + +have_uharfbuzz = False +try: + import uharfbuzz as hb + + # repack method added in uharfbuzz >= 0.23; if uharfbuzz *can* be + # imported but repack method is missing, behave as if uharfbuzz + # is not available (fallback to the slower Python implementation) + have_uharfbuzz = callable(getattr(hb, "repack", None)) +except ImportError: + pass + +USE_HARFBUZZ_REPACKER = OPTIONS[f"{__name__}:USE_HARFBUZZ_REPACKER"] + + +class OverflowErrorRecord(object): + def __init__(self, overflowTuple): + self.tableType = overflowTuple[0] + self.LookupListIndex = overflowTuple[1] + self.SubTableIndex = overflowTuple[2] + self.itemName = overflowTuple[3] + self.itemIndex = overflowTuple[4] + + def __repr__(self): + return str( + ( + self.tableType, + "LookupIndex:", + self.LookupListIndex, + "SubTableIndex:", + self.SubTableIndex, + "ItemName:", + self.itemName, + "ItemIndex:", + self.itemIndex, + ) + ) + + +class OTLOffsetOverflowError(Exception): + def __init__(self, overflowErrorRecord): + self.value = overflowErrorRecord + + def __str__(self): + return repr(self.value) + + +class RepackerState(IntEnum): + # Repacking control flow is implemnted using a state machine. The state machine table: + # + # State | Packing Success | Packing Failed | Exception Raised | + # ------------+-----------------+----------------+------------------+ + # PURE_FT | Return result | PURE_FT | Return failure | + # HB_FT | Return result | HB_FT | FT_FALLBACK | + # FT_FALLBACK | HB_FT | FT_FALLBACK | Return failure | + + # Pack only with fontTools, don't allow sharing between extensions. + PURE_FT = 1 + + # Attempt to pack with harfbuzz (allowing sharing between extensions) + # use fontTools to attempt overflow resolution. + HB_FT = 2 + + # Fallback if HB/FT packing gets stuck. Pack only with fontTools, don't allow sharing between + # extensions. + FT_FALLBACK = 3 + + +class BaseTTXConverter(DefaultTable): + """Generic base class for TTX table converters. It functions as an + adapter between the TTX (ttLib actually) table model and the model + we use for OpenType tables, which is necessarily subtly different. + """ + + def decompile(self, data, font): + """Create an object from the binary data. Called automatically on access.""" + from . import otTables + + reader = OTTableReader(data, tableTag=self.tableTag) + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.decompile(reader, font) + + def compile(self, font): + """Compiles the table into binary. Called automatically on save.""" + + # General outline: + # Create a top-level OTTableWriter for the GPOS/GSUB table. + # Call the compile method for the the table + # for each 'converter' record in the table converter list + # call converter's write method for each item in the value. + # - For simple items, the write method adds a string to the + # writer's self.items list. + # - For Struct/Table/Subtable items, it add first adds new writer to the + # to the writer's self.items, then calls the item's compile method. + # This creates a tree of writers, rooted at the GUSB/GPOS writer, with + # each writer representing a table, and the writer.items list containing + # the child data strings and writers. + # call the getAllData method + # call _doneWriting, which removes duplicates + # call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables + # Traverse the flat list of tables, calling getDataLength on each to update their position + # Traverse the flat list of tables again, calling getData each get the data in the table, now that + # pos's and offset are known. + + # If a lookup subtable overflows an offset, we have to start all over. + overflowRecord = None + # this is 3-state option: default (None) means automatically use hb.repack or + # silently fall back if it fails; True, use it and raise error if not possible + # or it errors out; False, don't use it, even if you can. + use_hb_repack = font.cfg[USE_HARFBUZZ_REPACKER] + if self.tableTag in ("GSUB", "GPOS"): + if use_hb_repack is False: + log.debug( + "hb.repack disabled, compiling '%s' with pure-python serializer", + self.tableTag, + ) + elif not have_uharfbuzz: + if use_hb_repack is True: + raise ImportError("No module named 'uharfbuzz'") + else: + assert use_hb_repack is None + log.debug( + "uharfbuzz not found, compiling '%s' with pure-python serializer", + self.tableTag, + ) + + if ( + use_hb_repack in (None, True) + and have_uharfbuzz + and self.tableTag in ("GSUB", "GPOS") + ): + state = RepackerState.HB_FT + else: + state = RepackerState.PURE_FT + + hb_first_error_logged = False + lastOverflowRecord = None + while True: + try: + writer = OTTableWriter(tableTag=self.tableTag) + self.table.compile(writer, font) + if state == RepackerState.HB_FT: + return self.tryPackingHarfbuzz(writer, hb_first_error_logged) + elif state == RepackerState.PURE_FT: + return self.tryPackingFontTools(writer) + elif state == RepackerState.FT_FALLBACK: + # Run packing with FontTools only, but don't return the result as it will + # not be optimally packed. Once a successful packing has been found, state is + # changed back to harfbuzz packing to produce the final, optimal, packing. + self.tryPackingFontTools(writer) + log.debug( + "Re-enabling sharing between extensions and switching back to " + "harfbuzz+fontTools packing." + ) + state = RepackerState.HB_FT + + except OTLOffsetOverflowError as e: + hb_first_error_logged = True + ok = self.tryResolveOverflow(font, e, lastOverflowRecord) + lastOverflowRecord = e.value + + if ok: + continue + + if state is RepackerState.HB_FT: + log.debug( + "Harfbuzz packing out of resolutions, disabling sharing between extensions and " + "switching to fontTools only packing." + ) + state = RepackerState.FT_FALLBACK + else: + raise + + def tryPackingHarfbuzz(self, writer, hb_first_error_logged): + try: + log.debug("serializing '%s' with hb.repack", self.tableTag) + return writer.getAllDataUsingHarfbuzz(self.tableTag) + except (ValueError, MemoryError, hb.RepackerError) as e: + # Only log hb repacker errors the first time they occur in + # the offset-overflow resolution loop, they are just noisy. + # Maybe we can revisit this if/when uharfbuzz actually gives + # us more info as to why hb.repack failed... + if not hb_first_error_logged: + error_msg = f"{type(e).__name__}" + if str(e) != "": + error_msg += f": {e}" + log.warning( + "hb.repack failed to serialize '%s', attempting fonttools resolutions " + "; the error message was: %s", + self.tableTag, + error_msg, + ) + hb_first_error_logged = True + return writer.getAllData(remove_duplicate=False) + + def tryPackingFontTools(self, writer): + return writer.getAllData() + + def tryResolveOverflow(self, font, e, lastOverflowRecord): + ok = 0 + if lastOverflowRecord == e.value: + # Oh well... + return ok + + overflowRecord = e.value + log.info("Attempting to fix OTLOffsetOverflowError %s", e) + + if overflowRecord.itemName is None: + from .otTables import fixLookupOverFlows + + ok = fixLookupOverFlows(font, overflowRecord) + else: + from .otTables import fixSubTableOverFlows + + ok = fixSubTableOverFlows(font, overflowRecord) + + if ok: + return ok + + # Try upgrading lookup to Extension and hope + # that cross-lookup sharing not happening would + # fix overflow... + from .otTables import fixLookupOverFlows + + return fixLookupOverFlows(font, overflowRecord) + + def toXML(self, writer, font): + self.table.toXML2(writer, font) + + def fromXML(self, name, attrs, content, font): + from . import otTables + + if not hasattr(self, "table"): + tableClass = getattr(otTables, self.tableTag) + self.table = tableClass() + self.table.fromXML(name, attrs, content, font) + self.table.populateDefaults() + + def ensureDecompiled(self, recurse=True): + self.table.ensureDecompiled(recurse=recurse) + + +# https://github.com/fonttools/fonttools/pull/2285#issuecomment-834652928 +assert len(struct.pack("i", 0)) == 4 +assert array.array("i").itemsize == 4, "Oops, file a bug against fonttools." + + +class OTTableReader(object): + """Helper class to retrieve data from an OpenType table.""" + + __slots__ = ("data", "offset", "pos", "localState", "tableTag") + + def __init__(self, data, localState=None, offset=0, tableTag=None): + self.data = data + self.offset = offset + self.pos = offset + self.localState = localState + self.tableTag = tableTag + + def advance(self, count): + self.pos += count + + def seek(self, pos): + self.pos = pos + + def copy(self): + other = self.__class__(self.data, self.localState, self.offset, self.tableTag) + other.pos = self.pos + return other + + def getSubReader(self, offset): + offset = self.offset + offset + return self.__class__(self.data, self.localState, offset, self.tableTag) + + def readValue(self, typecode, staticSize): + pos = self.pos + newpos = pos + staticSize + (value,) = struct.unpack(f">{typecode}", self.data[pos:newpos]) + self.pos = newpos + return value + + def readArray(self, typecode, staticSize, count): + pos = self.pos + newpos = pos + count * staticSize + value = array.array(typecode, self.data[pos:newpos]) + if sys.byteorder != "big": + value.byteswap() + self.pos = newpos + return value.tolist() + + def readInt8(self): + return self.readValue("b", staticSize=1) + + def readInt8Array(self, count): + return self.readArray("b", staticSize=1, count=count) + + def readShort(self): + return self.readValue("h", staticSize=2) + + def readShortArray(self, count): + return self.readArray("h", staticSize=2, count=count) + + def readLong(self): + return self.readValue("i", staticSize=4) + + def readLongArray(self, count): + return self.readArray("i", staticSize=4, count=count) + + def readUInt8(self): + return self.readValue("B", staticSize=1) + + def readUInt8Array(self, count): + return self.readArray("B", staticSize=1, count=count) + + def readUShort(self): + return self.readValue("H", staticSize=2) + + def readUShortArray(self, count): + return self.readArray("H", staticSize=2, count=count) + + def readULong(self): + return self.readValue("I", staticSize=4) + + def readULongArray(self, count): + return self.readArray("I", staticSize=4, count=count) + + def readUInt24(self): + pos = self.pos + newpos = pos + 3 + (value,) = struct.unpack(">l", b"\0" + self.data[pos:newpos]) + self.pos = newpos + return value + + def readUInt24Array(self, count): + return [self.readUInt24() for _ in range(count)] + + def readTag(self): + pos = self.pos + newpos = pos + 4 + value = Tag(self.data[pos:newpos]) + assert len(value) == 4, value + self.pos = newpos + return value + + def readData(self, count): + pos = self.pos + newpos = pos + count + value = self.data[pos:newpos] + self.pos = newpos + return value + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState and self.localState[name] + + def __contains__(self, name): + return self.localState and name in self.localState + + +class OffsetToWriter(object): + def __init__(self, subWriter, offsetSize): + self.subWriter = subWriter + self.offsetSize = offsetSize + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.subWriter == other.subWriter and self.offsetSize == other.offsetSize + + def __hash__(self): + # only works after self._doneWriting() has been called + return hash((self.subWriter, self.offsetSize)) + + +class OTTableWriter(object): + """Helper class to gather and assemble data for OpenType tables.""" + + def __init__(self, localState=None, tableTag=None): + self.items = [] + self.pos = None + self.localState = localState + self.tableTag = tableTag + self.parent = None + + def __setitem__(self, name, value): + state = self.localState.copy() if self.localState else dict() + state[name] = value + self.localState = state + + def __getitem__(self, name): + return self.localState[name] + + def __delitem__(self, name): + del self.localState[name] + + # assembler interface + + def getDataLength(self): + """Return the length of this table in bytes, without subtables.""" + l = 0 + for item in self.items: + if hasattr(item, "getCountData"): + l += item.size + elif hasattr(item, "subWriter"): + l += item.offsetSize + else: + l = l + len(item) + return l + + def getData(self): + """Assemble the data for this writer/table, without subtables.""" + items = list(self.items) # make a shallow copy + pos = self.pos + numItems = len(items) + for i in range(numItems): + item = items[i] + + if hasattr(item, "subWriter"): + if item.offsetSize == 4: + items[i] = packULong(item.subWriter.pos - pos) + elif item.offsetSize == 2: + try: + items[i] = packUShort(item.subWriter.pos - pos) + except struct.error: + # provide data to fix overflow problem. + overflowErrorRecord = self.getOverflowErrorRecord( + item.subWriter + ) + + raise OTLOffsetOverflowError(overflowErrorRecord) + elif item.offsetSize == 3: + items[i] = packUInt24(item.subWriter.pos - pos) + else: + raise ValueError(item.offsetSize) + + return bytesjoin(items) + + def getDataForHarfbuzz(self): + """Assemble the data for this writer/table with all offset field set to 0""" + items = list(self.items) + packFuncs = {2: packUShort, 3: packUInt24, 4: packULong} + for i, item in enumerate(items): + if hasattr(item, "subWriter"): + # Offset value is not needed in harfbuzz repacker, so setting offset to 0 to avoid overflow here + if item.offsetSize in packFuncs: + items[i] = packFuncs[item.offsetSize](0) + else: + raise ValueError(item.offsetSize) + + return bytesjoin(items) + + def __hash__(self): + # only works after self._doneWriting() has been called + return hash(self.items) + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.items == other.items + + def _doneWriting(self, internedTables, shareExtension=False): + # Convert CountData references to data string items + # collapse duplicate table references to a unique entry + # "tables" are OTTableWriter objects. + + # For Extension Lookup types, we can + # eliminate duplicates only within the tree under the Extension Lookup, + # as offsets may exceed 64K even between Extension LookupTable subtables. + isExtension = hasattr(self, "Extension") + + # Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level + # arrays (ScriptList, FeatureList, LookupList) point to the same, possibly + # empty, array. So, we don't share those. + # See: https://github.com/fonttools/fonttools/issues/518 + dontShare = hasattr(self, "DontShare") + + if isExtension and not shareExtension: + internedTables = {} + + items = self.items + for i in range(len(items)): + item = items[i] + if hasattr(item, "getCountData"): + items[i] = item.getCountData() + elif hasattr(item, "subWriter"): + item.subWriter._doneWriting( + internedTables, shareExtension=shareExtension + ) + # At this point, all subwriters are hashable based on their items. + # (See hash and comparison magic methods above.) So the ``setdefault`` + # call here will return the first writer object we've seen with + # equal content, or store it in the dictionary if it's not been + # seen yet. We therefore replace the subwriter object with an equivalent + # object, which deduplicates the tree. + if not dontShare: + items[i].subWriter = internedTables.setdefault( + item.subWriter, item.subWriter + ) + self.items = tuple(items) + + def _gatherTables(self, tables, extTables, done): + # Convert table references in self.items tree to a flat + # list of tables in depth-first traversal order. + # "tables" are OTTableWriter objects. + # We do the traversal in reverse order at each level, in order to + # resolve duplicate references to be the last reference in the list of tables. + # For extension lookups, duplicate references can be merged only within the + # writer tree under the extension lookup. + + done[id(self)] = True + + numItems = len(self.items) + iRange = list(range(numItems)) + iRange.reverse() + + isExtension = hasattr(self, "Extension") + + selfTables = tables + + if isExtension: + assert ( + extTables is not None + ), "Program or XML editing error. Extension subtables cannot contain extensions subtables" + tables, extTables, done = extTables, None, {} + + # add Coverage table if it is sorted last. + sortCoverageLast = False + if hasattr(self, "sortCoverageLast"): + # Find coverage table + for i in range(numItems): + item = self.items[i] + if ( + hasattr(item, "subWriter") + and getattr(item.subWriter, "name", None) == "Coverage" + ): + sortCoverageLast = True + break + if id(item.subWriter) not in done: + item.subWriter._gatherTables(tables, extTables, done) + else: + # We're a new parent of item + pass + + for i in iRange: + item = self.items[i] + if not hasattr(item, "subWriter"): + continue + + if ( + sortCoverageLast + and (i == 1) + and getattr(item.subWriter, "name", None) == "Coverage" + ): + # we've already 'gathered' it above + continue + + if id(item.subWriter) not in done: + item.subWriter._gatherTables(tables, extTables, done) + else: + # Item is already written out by other parent + pass + + selfTables.append(self) + + def _gatherGraphForHarfbuzz(self, tables, obj_list, done, objidx, virtual_edges): + real_links = [] + virtual_links = [] + item_idx = objidx + + # Merge virtual_links from parent + for idx in virtual_edges: + virtual_links.append((0, 0, idx)) + + sortCoverageLast = False + coverage_idx = 0 + if hasattr(self, "sortCoverageLast"): + # Find coverage table + for i, item in enumerate(self.items): + if getattr(item, "name", None) == "Coverage": + sortCoverageLast = True + if id(item) not in done: + coverage_idx = item_idx = item._gatherGraphForHarfbuzz( + tables, obj_list, done, item_idx, virtual_edges + ) + else: + coverage_idx = done[id(item)] + virtual_edges.append(coverage_idx) + break + + child_idx = 0 + offset_pos = 0 + for i, item in enumerate(self.items): + if hasattr(item, "subWriter"): + pos = offset_pos + elif hasattr(item, "getCountData"): + offset_pos += item.size + continue + else: + offset_pos = offset_pos + len(item) + continue + + if id(item.subWriter) not in done: + child_idx = item_idx = item.subWriter._gatherGraphForHarfbuzz( + tables, obj_list, done, item_idx, virtual_edges + ) + else: + child_idx = done[id(item.subWriter)] + + real_edge = (pos, item.offsetSize, child_idx) + real_links.append(real_edge) + offset_pos += item.offsetSize + + tables.append(self) + obj_list.append((real_links, virtual_links)) + item_idx += 1 + done[id(self)] = item_idx + if sortCoverageLast: + virtual_edges.pop() + + return item_idx + + def getAllDataUsingHarfbuzz(self, tableTag): + """The Whole table is represented as a Graph. + Assemble graph data and call Harfbuzz repacker to pack the table. + Harfbuzz repacker is faster and retain as much sub-table sharing as possible, see also: + https://github.com/harfbuzz/harfbuzz/blob/main/docs/repacker.md + The input format for hb.repack() method is explained here: + https://github.com/harfbuzz/uharfbuzz/blob/main/src/uharfbuzz/_harfbuzz.pyx#L1149 + """ + internedTables = {} + self._doneWriting(internedTables, shareExtension=True) + tables = [] + obj_list = [] + done = {} + objidx = 0 + virtual_edges = [] + self._gatherGraphForHarfbuzz(tables, obj_list, done, objidx, virtual_edges) + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getDataForHarfbuzz() + data.append(tableData) + + if hasattr(hb, "repack_with_tag"): + return hb.repack_with_tag(str(tableTag), data, obj_list) + else: + return hb.repack(data, obj_list) + + def getAllData(self, remove_duplicate=True): + """Assemble all data, including all subtables.""" + if remove_duplicate: + internedTables = {} + self._doneWriting(internedTables) + tables = [] + extTables = [] + done = {} + self._gatherTables(tables, extTables, done) + tables.reverse() + extTables.reverse() + # Gather all data in two passes: the absolute positions of all + # subtable are needed before the actual data can be assembled. + pos = 0 + for table in tables: + table.pos = pos + pos = pos + table.getDataLength() + + for table in extTables: + table.pos = pos + pos = pos + table.getDataLength() + + data = [] + for table in tables: + tableData = table.getData() + data.append(tableData) + + for table in extTables: + tableData = table.getData() + data.append(tableData) + + return bytesjoin(data) + + # interface for gathering data, as used by table.compile() + + def getSubWriter(self): + subwriter = self.__class__(self.localState, self.tableTag) + subwriter.parent = ( + self # because some subtables have idential values, we discard + ) + # the duplicates under the getAllData method. Hence some + # subtable writers can have more than one parent writer. + # But we just care about first one right now. + return subwriter + + def writeValue(self, typecode, value): + self.items.append(struct.pack(f">{typecode}", value)) + + def writeArray(self, typecode, values): + a = array.array(typecode, values) + if sys.byteorder != "big": + a.byteswap() + self.items.append(a.tobytes()) + + def writeInt8(self, value): + assert -128 <= value < 128, value + self.items.append(struct.pack(">b", value)) + + def writeInt8Array(self, values): + self.writeArray("b", values) + + def writeShort(self, value): + assert -32768 <= value < 32768, value + self.items.append(struct.pack(">h", value)) + + def writeShortArray(self, values): + self.writeArray("h", values) + + def writeLong(self, value): + self.items.append(struct.pack(">i", value)) + + def writeLongArray(self, values): + self.writeArray("i", values) + + def writeUInt8(self, value): + assert 0 <= value < 256, value + self.items.append(struct.pack(">B", value)) + + def writeUInt8Array(self, values): + self.writeArray("B", values) + + def writeUShort(self, value): + assert 0 <= value < 0x10000, value + self.items.append(struct.pack(">H", value)) + + def writeUShortArray(self, values): + self.writeArray("H", values) + + def writeULong(self, value): + self.items.append(struct.pack(">I", value)) + + def writeULongArray(self, values): + self.writeArray("I", values) + + def writeUInt24(self, value): + assert 0 <= value < 0x1000000, value + b = struct.pack(">L", value) + self.items.append(b[1:]) + + def writeUInt24Array(self, values): + for value in values: + self.writeUInt24(value) + + def writeTag(self, tag): + tag = Tag(tag).tobytes() + assert len(tag) == 4, tag + self.items.append(tag) + + def writeSubTable(self, subWriter, offsetSize): + self.items.append(OffsetToWriter(subWriter, offsetSize)) + + def writeCountReference(self, table, name, size=2, value=None): + ref = CountReference(table, name, size=size, value=value) + self.items.append(ref) + return ref + + def writeStruct(self, format, values): + data = struct.pack(*(format,) + values) + self.items.append(data) + + def writeData(self, data): + self.items.append(data) + + def getOverflowErrorRecord(self, item): + LookupListIndex = SubTableIndex = itemName = itemIndex = None + if self.name == "LookupList": + LookupListIndex = item.repeatIndex + elif self.name == "Lookup": + LookupListIndex = self.repeatIndex + SubTableIndex = item.repeatIndex + else: + itemName = getattr(item, "name", "") + if hasattr(item, "repeatIndex"): + itemIndex = item.repeatIndex + if self.name == "SubTable": + LookupListIndex = self.parent.repeatIndex + SubTableIndex = self.repeatIndex + elif self.name == "ExtSubTable": + LookupListIndex = self.parent.parent.repeatIndex + SubTableIndex = self.parent.repeatIndex + else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. + itemName = ".".join([self.name, itemName]) + p1 = self.parent + while p1 and p1.name not in ["ExtSubTable", "SubTable"]: + itemName = ".".join([p1.name, itemName]) + p1 = p1.parent + if p1: + if p1.name == "ExtSubTable": + LookupListIndex = p1.parent.parent.repeatIndex + SubTableIndex = p1.parent.repeatIndex + else: + LookupListIndex = p1.parent.repeatIndex + SubTableIndex = p1.repeatIndex + + return OverflowErrorRecord( + (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) + ) + + +class CountReference(object): + """A reference to a Count value, not a count of references.""" + + def __init__(self, table, name, size=None, value=None): + self.table = table + self.name = name + self.size = size + if value is not None: + self.setValue(value) + + def setValue(self, value): + table = self.table + name = self.name + if table[name] is None: + table[name] = value + else: + assert table[name] == value, (name, table[name], value) + + def getValue(self): + return self.table[self.name] + + def getCountData(self): + v = self.table[self.name] + if v is None: + v = 0 + return {1: packUInt8, 2: packUShort, 4: packULong}[self.size](v) + + +def packUInt8(value): + return struct.pack(">B", value) + + +def packUShort(value): + return struct.pack(">H", value) + + +def packULong(value): + assert 0 <= value < 0x100000000, value + return struct.pack(">I", value) + + +def packUInt24(value): + assert 0 <= value < 0x1000000, value + return struct.pack(">I", value)[1:] + + +class BaseTable(object): + """Generic base class for all OpenType (sub)tables.""" + + def __getattr__(self, attr): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + return getattr(self, attr) + + raise AttributeError(attr) + + def ensureDecompiled(self, recurse=False): + reader = self.__dict__.get("reader") + if reader: + del self.reader + font = self.font + del self.font + self.decompile(reader, font) + if recurse: + for subtable in self.iterSubTables(): + subtable.value.ensureDecompiled(recurse) + + def __getstate__(self): + # before copying/pickling 'lazy' objects, make a shallow copy of OTTableReader + # https://github.com/fonttools/fonttools/issues/2965 + if "reader" in self.__dict__: + state = self.__dict__.copy() + state["reader"] = self.__dict__["reader"].copy() + return state + return self.__dict__ + + @classmethod + def getRecordSize(cls, reader): + totalSize = 0 + for conv in cls.converters: + size = conv.getRecordSize(reader) + if size is NotImplemented: + return NotImplemented + countValue = 1 + if conv.repeat: + if conv.repeat in reader: + countValue = reader[conv.repeat] + conv.aux + else: + return NotImplemented + totalSize += size * countValue + return totalSize + + def getConverters(self): + return self.converters + + def getConverterByName(self, name): + return self.convertersByName[name] + + def populateDefaults(self, propagator=None): + for conv in self.getConverters(): + if conv.repeat: + if not hasattr(self, conv.name): + setattr(self, conv.name, []) + countValue = len(getattr(self, conv.name)) - conv.aux + try: + count_conv = self.getConverterByName(conv.repeat) + setattr(self, conv.repeat, countValue) + except KeyError: + # conv.repeat is a propagated count + if propagator and conv.repeat in propagator: + propagator[conv.repeat].setValue(countValue) + else: + if conv.aux and not eval(conv.aux, None, self.__dict__): + continue + if hasattr(self, conv.name): + continue # Warn if it should NOT be present?! + if hasattr(conv, "writeNullOffset"): + setattr(self, conv.name, None) # Warn? + # elif not conv.isCount: + # # Warn? + # pass + if hasattr(conv, "DEFAULT"): + # OptionalValue converters (e.g. VarIndex) + setattr(self, conv.name, conv.DEFAULT) + + def decompile(self, reader, font): + self.readFormat(reader) + table = {} + self.__rawTable = table # for debugging + for conv in self.getConverters(): + if conv.name == "SubTable": + conv = conv.getConverter(reader.tableTag, table["LookupType"]) + if conv.name == "ExtSubTable": + conv = conv.getConverter(reader.tableTag, table["ExtensionLookupType"]) + if conv.name == "FeatureParams": + conv = conv.getConverter(reader["FeatureTag"]) + if conv.name == "SubStruct": + conv = conv.getConverter(reader.tableTag, table["MorphType"]) + try: + if conv.repeat: + if isinstance(conv.repeat, int): + countValue = conv.repeat + elif conv.repeat in table: + countValue = table[conv.repeat] + else: + # conv.repeat is a propagated count + countValue = reader[conv.repeat] + countValue += conv.aux + table[conv.name] = conv.readArray(reader, font, table, countValue) + else: + if conv.aux and not eval(conv.aux, None, table): + continue + table[conv.name] = conv.read(reader, font, table) + if conv.isPropagated: + reader[conv.name] = table[conv.name] + except Exception as e: + name = conv.name + e.args = e.args + (name,) + raise + + if hasattr(self, "postRead"): + self.postRead(table, font) + else: + self.__dict__.update(table) + + del self.__rawTable # succeeded, get rid of debugging info + + def compile(self, writer, font): + self.ensureDecompiled() + # TODO Following hack to be removed by rewriting how FormatSwitching tables + # are handled. + # https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631 + if hasattr(self, "preWrite"): + deleteFormat = not hasattr(self, "Format") + table = self.preWrite(font) + deleteFormat = deleteFormat and hasattr(self, "Format") + else: + deleteFormat = False + table = self.__dict__.copy() + + # some count references may have been initialized in a custom preWrite; we set + # these in the writer's state beforehand (instead of sequentially) so they will + # be propagated to all nested subtables even if the count appears in the current + # table only *after* the offset to the subtable that it is counting. + for conv in self.getConverters(): + if conv.isCount and conv.isPropagated: + value = table.get(conv.name) + if isinstance(value, CountReference): + writer[conv.name] = value + + if hasattr(self, "sortCoverageLast"): + writer.sortCoverageLast = 1 + + if hasattr(self, "DontShare"): + writer.DontShare = True + + if hasattr(self.__class__, "LookupType"): + writer["LookupType"].setValue(self.__class__.LookupType) + + self.writeFormat(writer) + for conv in self.getConverters(): + value = table.get( + conv.name + ) # TODO Handle defaults instead of defaulting to None! + if conv.repeat: + if value is None: + value = [] + countValue = len(value) - conv.aux + if isinstance(conv.repeat, int): + assert len(value) == conv.repeat, "expected %d values, got %d" % ( + conv.repeat, + len(value), + ) + elif conv.repeat in table: + CountReference(table, conv.repeat, value=countValue) + else: + # conv.repeat is a propagated count + writer[conv.repeat].setValue(countValue) + try: + conv.writeArray(writer, font, table, value) + except Exception as e: + e.args = e.args + (conv.name + "[]",) + raise + elif conv.isCount: + # Special-case Count values. + # Assumption: a Count field will *always* precede + # the actual array(s). + # We need a default value, as it may be set later by a nested + # table. We will later store it here. + # We add a reference: by the time the data is assembled + # the Count value will be filled in. + # We ignore the current count value since it will be recomputed, + # unless it's a CountReference that was already initialized in a custom preWrite. + if isinstance(value, CountReference): + ref = value + ref.size = conv.staticSize + writer.writeData(ref) + table[conv.name] = ref.getValue() + else: + ref = writer.writeCountReference(table, conv.name, conv.staticSize) + table[conv.name] = None + if conv.isPropagated: + writer[conv.name] = ref + elif conv.isLookupType: + # We make sure that subtables have the same lookup type, + # and that the type is the same as the one set on the + # Lookup object, if any is set. + if conv.name not in table: + table[conv.name] = None + ref = writer.writeCountReference( + table, conv.name, conv.staticSize, table[conv.name] + ) + writer["LookupType"] = ref + else: + if conv.aux and not eval(conv.aux, None, table): + continue + try: + conv.write(writer, font, table, value) + except Exception as e: + name = value.__class__.__name__ if value is not None else conv.name + e.args = e.args + (name,) + raise + if conv.isPropagated: + writer[conv.name] = value + + if deleteFormat: + del self.Format + + def readFormat(self, reader): + pass + + def writeFormat(self, writer): + pass + + def toXML(self, xmlWriter, font, attrs=None, name=None): + tableName = name if name else self.__class__.__name__ + if attrs is None: + attrs = [] + if hasattr(self, "Format"): + attrs = attrs + [("Format", self.Format)] + xmlWriter.begintag(tableName, attrs) + xmlWriter.newline() + self.toXML2(xmlWriter, font) + xmlWriter.endtag(tableName) + xmlWriter.newline() + + def toXML2(self, xmlWriter, font): + # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). + # This is because in TTX our parent writes our main tag, and in otBase.py we + # do it ourselves. I think I'm getting schizophrenic... + for conv in self.getConverters(): + if conv.repeat: + value = getattr(self, conv.name, []) + for i in range(len(value)): + item = value[i] + conv.xmlWrite(xmlWriter, font, item, conv.name, [("index", i)]) + else: + if conv.aux and not eval(conv.aux, None, vars(self)): + continue + value = getattr( + self, conv.name, None + ) # TODO Handle defaults instead of defaulting to None! + conv.xmlWrite(xmlWriter, font, value, conv.name, []) + + def fromXML(self, name, attrs, content, font): + try: + conv = self.getConverterByName(name) + except KeyError: + raise # XXX on KeyError, raise nice error + value = conv.xmlRead(attrs, content, font) + # Some manually-written tables have a conv.repeat of "" + # to represent lists. Hence comparing to None here to + # allow those lists to be read correctly from XML. + if conv.repeat is not None: + seq = getattr(self, conv.name, None) + if seq is None: + seq = [] + setattr(self, conv.name, seq) + seq.append(value) + else: + setattr(self, conv.name, value) + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + + self.ensureDecompiled() + other.ensureDecompiled() + + return self.__dict__ == other.__dict__ + + class SubTableEntry(NamedTuple): + """See BaseTable.iterSubTables()""" + + name: str + value: "BaseTable" + index: Optional[int] = None # index into given array, None for single values + + def iterSubTables(self) -> Iterator[SubTableEntry]: + """Yield (name, value, index) namedtuples for all subtables of current table. + + A sub-table is an instance of BaseTable (or subclass thereof) that is a child + of self, the current parent table. + The tuples also contain the attribute name (str) of the of parent table to get + a subtable, and optionally, for lists of subtables (i.e. attributes associated + with a converter that has a 'repeat'), an index into the list containing the + given subtable value. + This method can be useful to traverse trees of otTables. + """ + for conv in self.getConverters(): + name = conv.name + value = getattr(self, name, None) + if value is None: + continue + if isinstance(value, BaseTable): + yield self.SubTableEntry(name, value) + elif isinstance(value, list): + yield from ( + self.SubTableEntry(name, v, index=i) + for i, v in enumerate(value) + if isinstance(v, BaseTable) + ) + + # instance (not @class)method for consistency with FormatSwitchingBaseTable + def getVariableAttrs(self): + return getVariableAttrs(self.__class__) + + +class FormatSwitchingBaseTable(BaseTable): + """Minor specialization of BaseTable, for tables that have multiple + formats, eg. CoverageFormat1 vs. CoverageFormat2.""" + + @classmethod + def getRecordSize(cls, reader): + return NotImplemented + + def getConverters(self): + try: + fmt = self.Format + except AttributeError: + # some FormatSwitchingBaseTables (e.g. Coverage) no longer have 'Format' + # attribute after fully decompiled, only gain one in preWrite before being + # recompiled. In the decompiled state, these hand-coded classes defined in + # otTables.py lose their format-specific nature and gain more high-level + # attributes that are not tied to converters. + return [] + return self.converters.get(self.Format, []) + + def getConverterByName(self, name): + return self.convertersByName[self.Format][name] + + def readFormat(self, reader): + self.Format = reader.readUShort() + + def writeFormat(self, writer): + writer.writeUShort(self.Format) + + def toXML(self, xmlWriter, font, attrs=None, name=None): + BaseTable.toXML(self, xmlWriter, font, attrs, name) + + def getVariableAttrs(self): + return getVariableAttrs(self.__class__, self.Format) + + +class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable): + def readFormat(self, reader): + self.Format = reader.readUInt8() + + def writeFormat(self, writer): + writer.writeUInt8(self.Format) + + +formatSwitchingBaseTables = { + "uint16": FormatSwitchingBaseTable, + "uint8": UInt8FormatSwitchingBaseTable, +} + + +def getFormatSwitchingBaseTableClass(formatType): + try: + return formatSwitchingBaseTables[formatType] + except KeyError: + raise TypeError(f"Unsupported format type: {formatType!r}") + + +# memoize since these are parsed from otData.py, thus stay constant +@lru_cache() +def getVariableAttrs(cls: BaseTable, fmt: Optional[int] = None) -> Tuple[str]: + """Return sequence of variable table field names (can be empty). + + Attributes are deemed "variable" when their otData.py's description contain + 'VarIndexBase + {offset}', e.g. COLRv1 PaintVar* tables. + """ + if not issubclass(cls, BaseTable): + raise TypeError(cls) + if issubclass(cls, FormatSwitchingBaseTable): + if fmt is None: + raise TypeError(f"'fmt' is required for format-switching {cls.__name__}") + converters = cls.convertersByName[fmt] + else: + converters = cls.convertersByName + # assume if no 'VarIndexBase' field is present, table has no variable fields + if "VarIndexBase" not in converters: + return () + varAttrs = {} + for name, conv in converters.items(): + offset = conv.getVarIndexOffset() + if offset is not None: + varAttrs[name] = offset + return tuple(sorted(varAttrs, key=varAttrs.__getitem__)) + + +# +# Support for ValueRecords +# +# This data type is so different from all other OpenType data types that +# it requires quite a bit of code for itself. It even has special support +# in OTTableReader and OTTableWriter... +# + +valueRecordFormat = [ + # Mask Name isDevice signed + (0x0001, "XPlacement", 0, 1), + (0x0002, "YPlacement", 0, 1), + (0x0004, "XAdvance", 0, 1), + (0x0008, "YAdvance", 0, 1), + (0x0010, "XPlaDevice", 1, 0), + (0x0020, "YPlaDevice", 1, 0), + (0x0040, "XAdvDevice", 1, 0), + (0x0080, "YAdvDevice", 1, 0), + # reserved: + (0x0100, "Reserved1", 0, 0), + (0x0200, "Reserved2", 0, 0), + (0x0400, "Reserved3", 0, 0), + (0x0800, "Reserved4", 0, 0), + (0x1000, "Reserved5", 0, 0), + (0x2000, "Reserved6", 0, 0), + (0x4000, "Reserved7", 0, 0), + (0x8000, "Reserved8", 0, 0), +] + + +def _buildDict(): + d = {} + for mask, name, isDevice, signed in valueRecordFormat: + d[name] = mask, isDevice, signed + return d + + +valueRecordFormatDict = _buildDict() + + +class ValueRecordFactory(object): + """Given a format code, this object convert ValueRecords.""" + + def __init__(self, valueFormat): + format = [] + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + format.append((name, isDevice, signed)) + self.format = format + + def __len__(self): + return len(self.format) + + def readValueRecord(self, reader, font): + format = self.format + if not format: + return None + valueRecord = ValueRecord() + for name, isDevice, signed in format: + if signed: + value = reader.readShort() + else: + value = reader.readUShort() + if isDevice: + if value: + from . import otTables + + subReader = reader.getSubReader(value) + value = getattr(otTables, name)() + value.decompile(subReader, font) + else: + value = None + setattr(valueRecord, name, value) + return valueRecord + + def writeValueRecord(self, writer, font, valueRecord): + for name, isDevice, signed in self.format: + value = getattr(valueRecord, name, 0) + if isDevice: + if value: + subWriter = writer.getSubWriter() + writer.writeSubTable(subWriter, offsetSize=2) + value.compile(subWriter, font) + else: + writer.writeUShort(0) + elif signed: + writer.writeShort(value) + else: + writer.writeUShort(value) + + +class ValueRecord(object): + # see ValueRecordFactory + + def __init__(self, valueFormat=None, src=None): + if valueFormat is not None: + for mask, name, isDevice, signed in valueRecordFormat: + if valueFormat & mask: + setattr(self, name, None if isDevice else 0) + if src is not None: + for key, val in src.__dict__.items(): + if not hasattr(self, key): + continue + setattr(self, key, val) + elif src is not None: + self.__dict__ = src.__dict__.copy() + + def getFormat(self): + format = 0 + for name in self.__dict__.keys(): + format = format | valueRecordFormatDict[name][0] + return format + + def getEffectiveFormat(self): + format = 0 + for name, value in self.__dict__.items(): + if value: + format = format | valueRecordFormatDict[name][0] + return format + + def toXML(self, xmlWriter, font, valueName, attrs=None): + if attrs is None: + simpleItems = [] + else: + simpleItems = list(attrs) + for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values + if hasattr(self, name): + simpleItems.append((name, getattr(self, name))) + deviceItems = [] + for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records + if hasattr(self, name): + device = getattr(self, name) + if device is not None: + deviceItems.append((name, device)) + if deviceItems: + xmlWriter.begintag(valueName, simpleItems) + xmlWriter.newline() + for name, deviceRecord in deviceItems: + if deviceRecord is not None: + deviceRecord.toXML(xmlWriter, font, name=name) + xmlWriter.endtag(valueName) + xmlWriter.newline() + else: + xmlWriter.simpletag(valueName, simpleItems) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, font): + from . import otTables + + for k, v in attrs.items(): + setattr(self, k, int(v)) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + value = getattr(otTables, name)() + for elem2 in content: + if not isinstance(elem2, tuple): + continue + name2, attrs2, content2 = elem2 + value.fromXML(name2, attrs2, content2, font) + setattr(self, name, value) + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otConverters.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otConverters.py new file mode 100644 index 0000000000000000000000000000000000000000..656836bd3cbe887a306ae3ba3e33deff173af7b6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otConverters.py @@ -0,0 +1,2064 @@ +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi, + floatToFixedToStr as fl2str, + strToFixedToFloat as str2fl, + ensureVersionIsLong as fi2ve, + versionToFixed as ve2fi, +) +from fontTools.ttLib.tables.TupleVariation import TupleVariation +from fontTools.misc.roundTools import nearestMultipleShortestRepr, otRound +from fontTools.misc.textTools import bytesjoin, tobytes, tostr, pad, safeEval +from fontTools.misc.lazyTools import LazyList +from fontTools.ttLib import getSearchRange +from .otBase import ( + CountReference, + FormatSwitchingBaseTable, + OTTableReader, + OTTableWriter, + ValueRecordFactory, +) +from .otTables import ( + lookupTypes, + VarCompositeGlyph, + AATStateTable, + AATState, + AATAction, + ContextualMorphAction, + LigatureMorphAction, + InsertionMorphAction, + MorxSubtable, + ExtendMode as _ExtendMode, + CompositeMode as _CompositeMode, + NO_VARIATION_INDEX, +) +from itertools import zip_longest, accumulate +from functools import partial +from types import SimpleNamespace +import re +import struct +from typing import Optional +import logging + + +log = logging.getLogger(__name__) +istuple = lambda t: isinstance(t, tuple) + + +def buildConverters(tableSpec, tableNamespace): + """Given a table spec from otData.py, build a converter object for each + field of the table. This is called for each table in otData.py, and + the results are assigned to the corresponding class in otTables.py.""" + converters = [] + convertersByName = {} + for tp, name, repeat, aux, descr in tableSpec: + tableName = name + if name.startswith("ValueFormat"): + assert tp == "uint16" + converterClass = ValueFormat + elif name.endswith("Count") or name in ("StructLength", "MorphType"): + converterClass = { + "uint8": ComputedUInt8, + "uint16": ComputedUShort, + "uint32": ComputedULong, + }[tp] + elif name == "SubTable": + converterClass = SubTable + elif name == "ExtSubTable": + converterClass = ExtSubTable + elif name == "SubStruct": + converterClass = SubStruct + elif name == "FeatureParams": + converterClass = FeatureParams + elif name in ("CIDGlyphMapping", "GlyphCIDMapping"): + converterClass = StructWithLength + else: + if not tp in converterMapping and "(" not in tp: + tableName = tp + converterClass = Struct + else: + converterClass = eval(tp, tableNamespace, converterMapping) + + conv = converterClass(name, repeat, aux, description=descr) + + if conv.tableClass: + # A "template" such as OffsetTo(AType) knows the table class already + tableClass = conv.tableClass + elif tp in ("MortChain", "MortSubtable", "MorxChain"): + tableClass = tableNamespace.get(tp) + else: + tableClass = tableNamespace.get(tableName) + + if not conv.tableClass: + conv.tableClass = tableClass + + if name in ["SubTable", "ExtSubTable", "SubStruct"]: + conv.lookupTypes = tableNamespace["lookupTypes"] + # also create reverse mapping + for t in conv.lookupTypes.values(): + for cls in t.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + if name == "FeatureParams": + conv.featureParamTypes = tableNamespace["featureParamTypes"] + conv.defaultFeatureParams = tableNamespace["FeatureParams"] + for cls in conv.featureParamTypes.values(): + convertersByName[cls.__name__] = Table(name, repeat, aux, cls) + converters.append(conv) + assert name not in convertersByName, name + convertersByName[name] = conv + return converters, convertersByName + + +class BaseConverter(object): + """Base class for converter objects. Apart from the constructor, this + is an abstract class.""" + + def __init__(self, name, repeat, aux, tableClass=None, *, description=""): + self.name = name + self.repeat = repeat + self.aux = aux + if self.aux and not self.repeat: + self.aux = compile(self.aux, "", "eval") + self.tableClass = tableClass + self.isCount = name.endswith("Count") or name in [ + "DesignAxisRecordSize", + "ValueRecordSize", + ] + self.isLookupType = name.endswith("LookupType") or name == "MorphType" + self.isPropagated = name in [ + "ClassCount", + "Class2Count", + "FeatureTag", + "SettingsCount", + "VarRegionCount", + "MappingCount", + "RegionAxisCount", + "DesignAxisCount", + "DesignAxisRecordSize", + "AxisValueCount", + "ValueRecordSize", + "AxisCount", + "BaseGlyphRecordCount", + "LayerRecordCount", + "AxisIndicesList", + ] + self.description = description + + def readArray(self, reader, font, tableDict, count): + """Read an array of values from the reader.""" + lazy = font.lazy and count > 8 + if lazy: + recordSize = self.getRecordSize(reader) + if recordSize is NotImplemented: + lazy = False + if not lazy: + l = [] + for i in range(count): + l.append(self.read(reader, font, tableDict)) + return l + else: + + def get_read_item(): + reader_copy = reader.copy() + pos = reader.pos + + def read_item(i): + reader_copy.seek(pos + i * recordSize) + return self.read(reader_copy, font, {}) + + return read_item + + read_item = get_read_item() + l = LazyList(read_item for i in range(count)) + reader.advance(count * recordSize) + + return l + + def getRecordSize(self, reader): + if hasattr(self, "staticSize"): + return self.staticSize + return NotImplemented + + def read(self, reader, font, tableDict): + """Read a value from the reader.""" + raise NotImplementedError(self) + + def writeArray(self, writer, font, tableDict, values): + try: + for i, value in enumerate(values): + self.write(writer, font, tableDict, value, i) + except Exception as e: + e.args = e.args + (i,) + raise + + def write(self, writer, font, tableDict, value, repeatIndex=None): + """Write a value to the writer.""" + raise NotImplementedError(self) + + def xmlRead(self, attrs, content, font): + """Read a value from XML.""" + raise NotImplementedError(self) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + """Write a value to XML.""" + raise NotImplementedError(self) + + varIndexBasePlusOffsetRE = re.compile(r"VarIndexBase\s*\+\s*(\d+)") + + def getVarIndexOffset(self) -> Optional[int]: + """If description has `VarIndexBase + {offset}`, return the offset else None.""" + m = self.varIndexBasePlusOffsetRE.search(self.description) + if not m: + return None + return int(m.group(1)) + + +class SimpleValue(BaseConverter): + @staticmethod + def toString(value): + return value + + @staticmethod + def fromString(value): + return value + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", self.toString(value))]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return self.fromString(attrs["value"]) + + +class OptionalValue(SimpleValue): + DEFAULT = None + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value != self.DEFAULT: + attrs.append(("value", self.toString(value))) + xmlWriter.simpletag(name, attrs) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + if "value" in attrs: + return self.fromString(attrs["value"]) + return self.DEFAULT + + +class IntValue(SimpleValue): + @staticmethod + def fromString(value): + return int(value, 0) + + +class Long(IntValue): + staticSize = 4 + + def read(self, reader, font, tableDict): + return reader.readLong() + + def readArray(self, reader, font, tableDict, count): + return reader.readLongArray(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeLong(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeLongArray(values) + + +class ULong(IntValue): + staticSize = 4 + + def read(self, reader, font, tableDict): + return reader.readULong() + + def readArray(self, reader, font, tableDict, count): + return reader.readULongArray(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeULong(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeULongArray(values) + + +class Flags32(ULong): + @staticmethod + def toString(value): + return "0x%08X" % value + + +class VarIndex(OptionalValue, ULong): + DEFAULT = NO_VARIATION_INDEX + + +class Short(IntValue): + staticSize = 2 + + def read(self, reader, font, tableDict): + return reader.readShort() + + def readArray(self, reader, font, tableDict, count): + return reader.readShortArray(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeShort(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeShortArray(values) + + +class UShort(IntValue): + staticSize = 2 + + def read(self, reader, font, tableDict): + return reader.readUShort() + + def readArray(self, reader, font, tableDict, count): + return reader.readUShortArray(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeUShortArray(values) + + +class Int8(IntValue): + staticSize = 1 + + def read(self, reader, font, tableDict): + return reader.readInt8() + + def readArray(self, reader, font, tableDict, count): + return reader.readInt8Array(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeInt8(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeInt8Array(values) + + +class UInt8(IntValue): + staticSize = 1 + + def read(self, reader, font, tableDict): + return reader.readUInt8() + + def readArray(self, reader, font, tableDict, count): + return reader.readUInt8Array(count) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt8(value) + + def writeArray(self, writer, font, tableDict, values): + writer.writeUInt8Array(values) + + +class UInt24(IntValue): + staticSize = 3 + + def read(self, reader, font, tableDict): + return reader.readUInt24() + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUInt24(value) + + +class ComputedInt(IntValue): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is not None: + xmlWriter.comment("%s=%s" % (name, value)) + xmlWriter.newline() + + +class ComputedUInt8(ComputedInt, UInt8): + pass + + +class ComputedUShort(ComputedInt, UShort): + pass + + +class ComputedULong(ComputedInt, ULong): + pass + + +class Tag(SimpleValue): + staticSize = 4 + + def read(self, reader, font, tableDict): + return reader.readTag() + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeTag(value) + + +class GlyphID(SimpleValue): + staticSize = 2 + typecode = "H" + + def readArray(self, reader, font, tableDict, count): + return font.getGlyphNameMany( + reader.readArray(self.typecode, self.staticSize, count) + ) + + def read(self, reader, font, tableDict): + return font.getGlyphName(reader.readValue(self.typecode, self.staticSize)) + + def writeArray(self, writer, font, tableDict, values): + writer.writeArray(self.typecode, font.getGlyphIDMany(values)) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeValue(self.typecode, font.getGlyphID(value)) + + +class GlyphID32(GlyphID): + staticSize = 4 + typecode = "L" + + +class NameID(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + if font and value: + nameTable = font.get("name") + if nameTable: + name = nameTable.getDebugName(value) + xmlWriter.write(" ") + if name: + xmlWriter.comment(name) + else: + xmlWriter.comment("missing from name table") + log.warning("name id %d missing from name table" % value) + xmlWriter.newline() + + +class STATFlags(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + flags = [] + if value & 0x01: + flags.append("OlderSiblingFontAttribute") + if value & 0x02: + flags.append("ElidableAxisValueName") + if flags: + xmlWriter.write(" ") + xmlWriter.comment(" ".join(flags)) + xmlWriter.newline() + + +class FloatValue(SimpleValue): + @staticmethod + def fromString(value): + return float(value) + + +class DeciPoints(FloatValue): + staticSize = 2 + + def read(self, reader, font, tableDict): + return reader.readUShort() / 10 + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.writeUShort(round(value * 10)) + + +class BaseFixedValue(FloatValue): + staticSize = NotImplemented + precisionBits = NotImplemented + readerMethod = NotImplemented + writerMethod = NotImplemented + + def read(self, reader, font, tableDict): + return self.fromInt(getattr(reader, self.readerMethod)()) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + getattr(writer, self.writerMethod)(self.toInt(value)) + + @classmethod + def fromInt(cls, value): + return fi2fl(value, cls.precisionBits) + + @classmethod + def toInt(cls, value): + return fl2fi(value, cls.precisionBits) + + @classmethod + def fromString(cls, value): + return str2fl(value, cls.precisionBits) + + @classmethod + def toString(cls, value): + return fl2str(value, cls.precisionBits) + + +class Fixed(BaseFixedValue): + staticSize = 4 + precisionBits = 16 + readerMethod = "readLong" + writerMethod = "writeLong" + + +class F2Dot14(BaseFixedValue): + staticSize = 2 + precisionBits = 14 + readerMethod = "readShort" + writerMethod = "writeShort" + + +class Angle(F2Dot14): + # angles are specified in degrees, and encoded as F2Dot14 fractions of half + # circle: e.g. 1.0 => 180, -0.5 => -90, -2.0 => -360, etc. + bias = 0.0 + factor = 1.0 / (1 << 14) * 180 # 0.010986328125 + + @classmethod + def fromInt(cls, value): + return (super().fromInt(value) + cls.bias) * 180 + + @classmethod + def toInt(cls, value): + return super().toInt((value / 180) - cls.bias) + + @classmethod + def fromString(cls, value): + # quantize to nearest multiples of minimum fixed-precision angle + return otRound(float(value) / cls.factor) * cls.factor + + @classmethod + def toString(cls, value): + return nearestMultipleShortestRepr(value, cls.factor) + + +class BiasedAngle(Angle): + # A bias of 1.0 is used in the representation of start and end angles + # of COLRv1 PaintSweepGradients to allow for encoding +360deg + bias = 1.0 + + +class Version(SimpleValue): + staticSize = 4 + + def read(self, reader, font, tableDict): + value = reader.readLong() + return value + + def write(self, writer, font, tableDict, value, repeatIndex=None): + value = fi2ve(value) + writer.writeLong(value) + + @staticmethod + def fromString(value): + return ve2fi(value) + + @staticmethod + def toString(value): + return "0x%08x" % value + + @staticmethod + def fromFloat(v): + return fl2fi(v, 16) + + +class Char64(SimpleValue): + """An ASCII string with up to 64 characters. + + Unused character positions are filled with 0x00 bytes. + Used in Apple AAT fonts in the `gcid` table. + """ + + staticSize = 64 + + def read(self, reader, font, tableDict): + data = reader.readData(self.staticSize) + zeroPos = data.find(b"\0") + if zeroPos >= 0: + data = data[:zeroPos] + s = tostr(data, encoding="ascii", errors="replace") + if s != tostr(data, encoding="ascii", errors="ignore"): + log.warning('replaced non-ASCII characters in "%s"' % s) + return s + + def write(self, writer, font, tableDict, value, repeatIndex=None): + data = tobytes(value, encoding="ascii", errors="replace") + if data != tobytes(value, encoding="ascii", errors="ignore"): + log.warning('replacing non-ASCII characters in "%s"' % value) + if len(data) > self.staticSize: + log.warning( + 'truncating overlong "%s" to %d bytes' % (value, self.staticSize) + ) + data = (data + b"\0" * self.staticSize)[: self.staticSize] + writer.writeData(data) + + +class Struct(BaseConverter): + def getRecordSize(self, reader): + return self.tableClass and self.tableClass.getRecordSize(reader) + + def read(self, reader, font, tableDict): + table = self.tableClass() + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + value.compile(writer, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + if attrs: + # If there are attributes (probably index), then + # don't drop this even if it's NULL. It will mess + # up the array indices of the containing element. + xmlWriter.simpletag(name, attrs + [("empty", 1)]) + xmlWriter.newline() + else: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, attrs, name=name) + + def xmlRead(self, attrs, content, font): + if "empty" in attrs and safeEval(attrs["empty"]): + return None + table = self.tableClass() + Format = attrs.get("Format") + if Format is not None: + table.Format = int(Format) + + noPostRead = not hasattr(table, "postRead") + if noPostRead: + # TODO Cache table.hasPropagated. + cleanPropagation = False + for conv in table.getConverters(): + if conv.isPropagated: + cleanPropagation = True + if not hasattr(font, "_propagator"): + font._propagator = {} + propagator = font._propagator + assert conv.name not in propagator, (conv.name, propagator) + setattr(table, conv.name, None) + propagator[conv.name] = CountReference(table.__dict__, conv.name) + + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + table.fromXML(name, attrs, content, font) + else: + pass + + table.populateDefaults(propagator=getattr(font, "_propagator", None)) + + if noPostRead: + if cleanPropagation: + for conv in table.getConverters(): + if conv.isPropagated: + propagator = font._propagator + del propagator[conv.name] + if not propagator: + del font._propagator + + return table + + def __repr__(self): + return "Struct of " + repr(self.tableClass) + + +class StructWithLength(Struct): + def read(self, reader, font, tableDict): + pos = reader.pos + table = self.tableClass() + table.decompile(reader, font) + reader.seek(pos + table.StructLength) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + for convIndex, conv in enumerate(value.getConverters()): + if conv.name == "StructLength": + break + lengthIndex = len(writer.items) + convIndex + if isinstance(value, FormatSwitchingBaseTable): + lengthIndex += 1 # implicit Format field + deadbeef = {1: 0xDE, 2: 0xDEAD, 4: 0xDEADBEEF}[conv.staticSize] + + before = writer.getDataLength() + value.StructLength = deadbeef + value.compile(writer, font) + length = writer.getDataLength() - before + lengthWriter = writer.getSubWriter() + conv.write(lengthWriter, font, tableDict, length) + assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"[: conv.staticSize] + writer.items[lengthIndex] = lengthWriter.getAllData() + + +class Table(Struct): + staticSize = 2 + + def readOffset(self, reader): + return reader.readUShort() + + def writeNullOffset(self, writer): + writer.writeUShort(0) + + def read(self, reader, font, tableDict): + offset = self.readOffset(reader) + if offset == 0: + return None + table = self.tableClass() + reader = reader.getSubReader(offset) + if font.lazy: + table.reader = reader + table.font = font + else: + table.decompile(reader, font) + return table + + def write(self, writer, font, tableDict, value, repeatIndex=None): + if value is None: + self.writeNullOffset(writer) + else: + subWriter = writer.getSubWriter() + subWriter.name = self.name + if repeatIndex is not None: + subWriter.repeatIndex = repeatIndex + writer.writeSubTable(subWriter, offsetSize=self.staticSize) + value.compile(subWriter, font) + + +class LTable(Table): + staticSize = 4 + + def readOffset(self, reader): + return reader.readULong() + + def writeNullOffset(self, writer): + writer.writeULong(0) + + +# Table pointed to by a 24-bit, 3-byte long offset +class Table24(Table): + staticSize = 3 + + def readOffset(self, reader): + return reader.readUInt24() + + def writeNullOffset(self, writer): + writer.writeUInt24(0) + + +# TODO Clean / merge the SubTable and SubStruct + + +class SubStruct(Struct): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs) + + +class SubTable(Table): + def getConverter(self, tableType, lookupType): + tableClass = self.lookupTypes[tableType][lookupType] + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs) + + +class ExtSubTable(LTable, SubTable): + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer. + Table.write(self, writer, font, tableDict, value, repeatIndex) + + +class FeatureParams(Table): + def getConverter(self, featureTag): + tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams) + return self.__class__(self.name, self.repeat, self.aux, tableClass) + + +class ValueFormat(IntValue): + staticSize = 2 + + def __init__(self, name, repeat, aux, tableClass=None, *, description=""): + BaseConverter.__init__( + self, name, repeat, aux, tableClass, description=description + ) + self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1") + + def read(self, reader, font, tableDict): + format = reader.readUShort() + reader[self.which] = ValueRecordFactory(format) + return format + + def write(self, writer, font, tableDict, format, repeatIndex=None): + writer.writeUShort(format) + writer[self.which] = ValueRecordFactory(format) + + +class ValueRecord(ValueFormat): + def getRecordSize(self, reader): + return 2 * len(reader[self.which]) + + def read(self, reader, font, tableDict): + return reader[self.which].readValueRecord(reader, font) + + def write(self, writer, font, tableDict, value, repeatIndex=None): + writer[self.which].writeValueRecord(writer, font, value) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if value is None: + pass # NULL table, ignore + else: + value.toXML(xmlWriter, font, self.name, attrs) + + def xmlRead(self, attrs, content, font): + from .otBase import ValueRecord + + value = ValueRecord() + value.fromXML(None, attrs, content, font) + return value + + +class AATLookup(BaseConverter): + BIN_SEARCH_HEADER_SIZE = 10 + + def __init__(self, name, repeat, aux, tableClass, *, description=""): + BaseConverter.__init__( + self, name, repeat, aux, tableClass, description=description + ) + if issubclass(self.tableClass, SimpleValue): + self.converter = self.tableClass(name="Value", repeat=None, aux=None) + else: + self.converter = Table( + name="Value", repeat=None, aux=None, tableClass=self.tableClass + ) + + def read(self, reader, font, tableDict): + format = reader.readUShort() + if format == 0: + return self.readFormat0(reader, font) + elif format == 2: + return self.readFormat2(reader, font) + elif format == 4: + return self.readFormat4(reader, font) + elif format == 6: + return self.readFormat6(reader, font) + elif format == 8: + return self.readFormat8(reader, font) + else: + assert False, "unsupported lookup format: %d" % format + + def write(self, writer, font, tableDict, value, repeatIndex=None): + values = list( + sorted([(font.getGlyphID(glyph), val) for glyph, val in value.items()]) + ) + # TODO: Also implement format 4. + formats = list( + sorted( + filter( + None, + [ + self.buildFormat0(writer, font, values), + self.buildFormat2(writer, font, values), + self.buildFormat6(writer, font, values), + self.buildFormat8(writer, font, values), + ], + ) + ) + ) + # We use the format ID as secondary sort key to make the output + # deterministic when multiple formats have same encoded size. + dataSize, lookupFormat, writeMethod = formats[0] + pos = writer.getDataLength() + writeMethod() + actualSize = writer.getDataLength() - pos + assert ( + actualSize == dataSize + ), "AATLookup format %d claimed to write %d bytes, but wrote %d" % ( + lookupFormat, + dataSize, + actualSize, + ) + + @staticmethod + def writeBinSearchHeader(writer, numUnits, unitSize): + writer.writeUShort(unitSize) + writer.writeUShort(numUnits) + searchRange, entrySelector, rangeShift = getSearchRange( + n=numUnits, itemSize=unitSize + ) + writer.writeUShort(searchRange) + writer.writeUShort(entrySelector) + writer.writeUShort(rangeShift) + + def buildFormat0(self, writer, font, values): + numGlyphs = len(font.getGlyphOrder()) + if len(values) != numGlyphs: + return None + valueSize = self.converter.staticSize + return ( + 2 + numGlyphs * valueSize, + 0, + lambda: self.writeFormat0(writer, font, values), + ) + + def writeFormat0(self, writer, font, values): + writer.writeUShort(0) + for glyphID_, value in values: + self.converter.write( + writer, font, tableDict=None, value=value, repeatIndex=None + ) + + def buildFormat2(self, writer, font, values): + segStart, segValue = values[0] + segEnd = segStart + segments = [] + for glyphID, curValue in values[1:]: + if glyphID != segEnd + 1 or curValue != segValue: + segments.append((segStart, segEnd, segValue)) + segStart = segEnd = glyphID + segValue = curValue + else: + segEnd = glyphID + segments.append((segStart, segEnd, segValue)) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments) + 1, valueSize + 4 + return ( + 2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, + 2, + lambda: self.writeFormat2(writer, font, segments), + ) + + def writeFormat2(self, writer, font, segments): + writer.writeUShort(2) + valueSize = self.converter.staticSize + numUnits, unitSize = len(segments), valueSize + 4 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for firstGlyph, lastGlyph, value in segments: + writer.writeUShort(lastGlyph) + writer.writeUShort(firstGlyph) + self.converter.write( + writer, font, tableDict=None, value=value, repeatIndex=None + ) + writer.writeUShort(0xFFFF) + writer.writeUShort(0xFFFF) + writer.writeData(b"\x00" * valueSize) + + def buildFormat6(self, writer, font, values): + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + return ( + 2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, + 6, + lambda: self.writeFormat6(writer, font, values), + ) + + def writeFormat6(self, writer, font, values): + writer.writeUShort(6) + valueSize = self.converter.staticSize + numUnits, unitSize = len(values), valueSize + 2 + self.writeBinSearchHeader(writer, numUnits, unitSize) + for glyphID, value in values: + writer.writeUShort(glyphID) + self.converter.write( + writer, font, tableDict=None, value=value, repeatIndex=None + ) + writer.writeUShort(0xFFFF) + writer.writeData(b"\x00" * valueSize) + + def buildFormat8(self, writer, font, values): + minGlyphID, maxGlyphID = values[0][0], values[-1][0] + if len(values) != maxGlyphID - minGlyphID + 1: + return None + valueSize = self.converter.staticSize + return ( + 6 + len(values) * valueSize, + 8, + lambda: self.writeFormat8(writer, font, values), + ) + + def writeFormat8(self, writer, font, values): + firstGlyphID = values[0][0] + writer.writeUShort(8) + writer.writeUShort(firstGlyphID) + writer.writeUShort(len(values)) + for _, value in values: + self.converter.write( + writer, font, tableDict=None, value=value, repeatIndex=None + ) + + def readFormat0(self, reader, font): + numGlyphs = len(font.getGlyphOrder()) + data = self.converter.readArray(reader, font, tableDict=None, count=numGlyphs) + return {font.getGlyphName(k): value for k, value in enumerate(data)} + + def readFormat2(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize, numUnits = reader.readUShort(), reader.readUShort() + assert unitSize >= 4 + self.converter.staticSize, unitSize + for i in range(numUnits): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + value = self.converter.read(reader, font, tableDict=None) + if last != 0xFFFF: + for k in range(first, last + 1): + mapping[font.getGlyphName(k)] = value + return mapping + + def readFormat4(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 6, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + last = reader.readUShort() + first = reader.readUShort() + offset = reader.readUShort() + if last != 0xFFFF: + dataReader = reader.getSubReader(0) # relative to current position + dataReader.seek(pos + offset) # relative to start of table + data = self.converter.readArray( + dataReader, font, tableDict=None, count=last - first + 1 + ) + for k, v in enumerate(data): + mapping[font.getGlyphName(first + k)] = v + return mapping + + def readFormat6(self, reader, font): + mapping = {} + pos = reader.pos - 2 # start of table is at UShort for format + unitSize = reader.readUShort() + assert unitSize >= 2 + self.converter.staticSize, unitSize + for i in range(reader.readUShort()): + reader.seek(pos + i * unitSize + 12) + glyphID = reader.readUShort() + value = self.converter.read(reader, font, tableDict=None) + if glyphID != 0xFFFF: + mapping[font.getGlyphName(glyphID)] = value + return mapping + + def readFormat8(self, reader, font): + first = reader.readUShort() + count = reader.readUShort() + data = self.converter.readArray(reader, font, tableDict=None, count=count) + return {font.getGlyphName(first + k): value for (k, value) in enumerate(data)} + + def xmlRead(self, attrs, content, font): + value = {} + for element in content: + if isinstance(element, tuple): + name, a, eltContent = element + if name == "Lookup": + value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font) + return value + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, value in sorted(value.items()): + self.converter.xmlWrite( + xmlWriter, font, value=value, name="Lookup", attrs=[("glyph", glyph)] + ) + xmlWriter.endtag(name) + xmlWriter.newline() + + +# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup +# followed by an offset to a glyph data table. Other than usual, the +# offsets in the AATLookup are not relative to the beginning of +# the beginning of the 'ankr' table, but relative to the glyph data table. +# So, to find the anchor data for a glyph, one needs to add the offset +# to the data table to the offset found in the AATLookup, and then use +# the sum of these two offsets to find the actual data. +class AATLookupWithDataOffset(BaseConverter): + def read(self, reader, font, tableDict): + lookupOffset = reader.readULong() + dataOffset = reader.readULong() + lookupReader = reader.getSubReader(lookupOffset) + lookup = AATLookup("DataOffsets", None, None, UShort) + offsets = lookup.read(lookupReader, font, tableDict) + result = {} + for glyph, offset in offsets.items(): + dataReader = reader.getSubReader(offset + dataOffset) + item = self.tableClass() + item.decompile(dataReader, font) + result[glyph] = item + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + # We do not work with OTTableWriter sub-writers because + # the offsets in our AATLookup are relative to our data + # table, for which we need to provide an offset value itself. + # It might have been possible to somehow make a kludge for + # performing this indirect offset computation directly inside + # OTTableWriter. But this would have made the internal logic + # of OTTableWriter even more complex than it already is, + # so we decided to roll our own offset computation for the + # contents of the AATLookup and associated data table. + offsetByGlyph, offsetByData, dataLen = {}, {}, 0 + compiledData = [] + for glyph in sorted(value, key=font.getGlyphID): + subWriter = OTTableWriter() + value[glyph].compile(subWriter, font) + data = subWriter.getAllData() + offset = offsetByData.get(data, None) + if offset == None: + offset = dataLen + dataLen = dataLen + len(data) + offsetByData[data] = offset + compiledData.append(data) + offsetByGlyph[glyph] = offset + # For calculating the offsets to our AATLookup and data table, + # we can use the regular OTTableWriter infrastructure. + lookupWriter = writer.getSubWriter() + lookup = AATLookup("DataOffsets", None, None, UShort) + lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None) + + dataWriter = writer.getSubWriter() + writer.writeSubTable(lookupWriter, offsetSize=4) + writer.writeSubTable(dataWriter, offsetSize=4) + for d in compiledData: + dataWriter.writeData(d) + + def xmlRead(self, attrs, content, font): + lookup = AATLookup("DataOffsets", None, None, self.tableClass) + return lookup.xmlRead(attrs, content, font) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + lookup = AATLookup("DataOffsets", None, None, self.tableClass) + lookup.xmlWrite(xmlWriter, font, value, name, attrs) + + +class MorxSubtableConverter(BaseConverter): + _PROCESSING_ORDERS = { + # bits 30 and 28 of morx.CoverageFlags; see morx spec + (False, False): "LayoutOrder", + (True, False): "ReversedLayoutOrder", + (False, True): "LogicalOrder", + (True, True): "ReversedLogicalOrder", + } + + _PROCESSING_ORDERS_REVERSED = {val: key for key, val in _PROCESSING_ORDERS.items()} + + def __init__(self, name, repeat, aux, tableClass=None, *, description=""): + BaseConverter.__init__( + self, name, repeat, aux, tableClass, description=description + ) + + def _setTextDirectionFromCoverageFlags(self, flags, subtable): + if (flags & 0x20) != 0: + subtable.TextDirection = "Any" + elif (flags & 0x80) != 0: + subtable.TextDirection = "Vertical" + else: + subtable.TextDirection = "Horizontal" + + def read(self, reader, font, tableDict): + pos = reader.pos + m = MorxSubtable() + m.StructLength = reader.readULong() + flags = reader.readUInt8() + orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] + self._setTextDirectionFromCoverageFlags(flags, m) + m.Reserved = reader.readUShort() + m.Reserved |= (flags & 0xF) << 16 + m.MorphType = reader.readUInt8() + m.SubFeatureFlags = reader.readULong() + tableClass = lookupTypes["morx"].get(m.MorphType) + if tableClass is None: + assert False, "unsupported 'morx' lookup type %s" % m.MorphType + # To decode AAT ligatures, we need to know the subtable size. + # The easiest way to pass this along is to create a new reader + # that works on just the subtable as its data. + headerLength = reader.pos - pos + data = reader.data[reader.pos : reader.pos + m.StructLength - headerLength] + assert len(data) == m.StructLength - headerLength + subReader = OTTableReader(data=data, tableTag=reader.tableTag) + m.SubStruct = tableClass() + m.SubStruct.decompile(subReader, font) + reader.seek(pos + m.StructLength) + return m + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("StructLength=%d" % value.StructLength) + xmlWriter.newline() + xmlWriter.simpletag("TextDirection", value=value.TextDirection) + xmlWriter.newline() + xmlWriter.simpletag("ProcessingOrder", value=value.ProcessingOrder) + xmlWriter.newline() + if value.Reserved != 0: + xmlWriter.simpletag("Reserved", value="0x%04x" % value.Reserved) + xmlWriter.newline() + xmlWriter.comment("MorphType=%d" % value.MorphType) + xmlWriter.newline() + xmlWriter.simpletag("SubFeatureFlags", value="0x%08x" % value.SubFeatureFlags) + xmlWriter.newline() + value.SubStruct.toXML(xmlWriter, font) + xmlWriter.endtag(name) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + m = MorxSubtable() + covFlags = 0 + m.Reserved = 0 + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "CoverageFlags": + # Only in XML from old versions of fonttools. + covFlags = safeEval(eltAttrs["value"]) + orderKey = ((covFlags & 0x40) != 0, (covFlags & 0x10) != 0) + m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey] + self._setTextDirectionFromCoverageFlags(covFlags, m) + elif eltName == "ProcessingOrder": + m.ProcessingOrder = eltAttrs["value"] + assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, ( + "unknown ProcessingOrder: %s" % m.ProcessingOrder + ) + elif eltName == "TextDirection": + m.TextDirection = eltAttrs["value"] + assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, ( + "unknown TextDirection %s" % m.TextDirection + ) + elif eltName == "Reserved": + m.Reserved = safeEval(eltAttrs["value"]) + elif eltName == "SubFeatureFlags": + m.SubFeatureFlags = safeEval(eltAttrs["value"]) + elif eltName.endswith("Morph"): + m.fromXML(eltName, eltAttrs, eltContent, font) + else: + assert False, eltName + m.Reserved = (covFlags & 0xF) << 16 | m.Reserved + return m + + def write(self, writer, font, tableDict, value, repeatIndex=None): + covFlags = (value.Reserved & 0x000F0000) >> 16 + reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[ + value.ProcessingOrder + ] + covFlags |= 0x80 if value.TextDirection == "Vertical" else 0 + covFlags |= 0x40 if reverseOrder else 0 + covFlags |= 0x20 if value.TextDirection == "Any" else 0 + covFlags |= 0x10 if logicalOrder else 0 + value.CoverageFlags = covFlags + lengthIndex = len(writer.items) + before = writer.getDataLength() + value.StructLength = 0xDEADBEEF + # The high nibble of value.Reserved is actuallly encoded + # into coverageFlags, so we need to clear it here. + origReserved = value.Reserved # including high nibble + value.Reserved = value.Reserved & 0xFFFF # without high nibble + value.compile(writer, font) + value.Reserved = origReserved # restore original value + assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef" + length = writer.getDataLength() - before + writer.items[lengthIndex] = struct.pack(">L", length) + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader +# TODO: Untangle the implementation of the various lookup-specific formats. +class STXHeader(BaseConverter): + def __init__(self, name, repeat, aux, tableClass, *, description=""): + BaseConverter.__init__( + self, name, repeat, aux, tableClass, description=description + ) + assert issubclass(self.tableClass, AATAction) + self.classLookup = AATLookup("GlyphClasses", None, None, UShort) + if issubclass(self.tableClass, ContextualMorphAction): + self.perGlyphLookup = AATLookup("PerGlyphLookup", None, None, GlyphID) + else: + self.perGlyphLookup = None + + def read(self, reader, font, tableDict): + table = AATStateTable() + pos = reader.pos + classTableReader = reader.getSubReader(0) + stateArrayReader = reader.getSubReader(0) + entryTableReader = reader.getSubReader(0) + actionReader = None + ligaturesReader = None + table.GlyphClassCount = reader.readULong() + classTableReader.seek(pos + reader.readULong()) + stateArrayReader.seek(pos + reader.readULong()) + entryTableReader.seek(pos + reader.readULong()) + if self.perGlyphLookup is not None: + perGlyphTableReader = reader.getSubReader(0) + perGlyphTableReader.seek(pos + reader.readULong()) + if issubclass(self.tableClass, LigatureMorphAction): + actionReader = reader.getSubReader(0) + actionReader.seek(pos + reader.readULong()) + ligComponentReader = reader.getSubReader(0) + ligComponentReader.seek(pos + reader.readULong()) + ligaturesReader = reader.getSubReader(0) + ligaturesReader.seek(pos + reader.readULong()) + numLigComponents = (ligaturesReader.pos - ligComponentReader.pos) // 2 + assert numLigComponents >= 0 + table.LigComponents = ligComponentReader.readUShortArray(numLigComponents) + table.Ligatures = self._readLigatures(ligaturesReader, font) + elif issubclass(self.tableClass, InsertionMorphAction): + actionReader = reader.getSubReader(0) + actionReader.seek(pos + reader.readULong()) + table.GlyphClasses = self.classLookup.read(classTableReader, font, tableDict) + numStates = int( + (entryTableReader.pos - stateArrayReader.pos) / (table.GlyphClassCount * 2) + ) + for stateIndex in range(numStates): + state = AATState() + table.States.append(state) + for glyphClass in range(table.GlyphClassCount): + entryIndex = stateArrayReader.readUShort() + state.Transitions[glyphClass] = self._readTransition( + entryTableReader, entryIndex, font, actionReader + ) + if self.perGlyphLookup is not None: + table.PerGlyphLookups = self._readPerGlyphLookups( + table, perGlyphTableReader, font + ) + return table + + def _readTransition(self, reader, entryIndex, font, actionReader): + transition = self.tableClass() + entryReader = reader.getSubReader( + reader.pos + entryIndex * transition.staticSize + ) + transition.decompile(entryReader, font, actionReader) + return transition + + def _readLigatures(self, reader, font): + limit = len(reader.data) + numLigatureGlyphs = (limit - reader.pos) // 2 + return font.getGlyphNameMany(reader.readUShortArray(numLigatureGlyphs)) + + def _countPerGlyphLookups(self, table): + # Somewhat annoyingly, the morx table does not encode + # the size of the per-glyph table. So we need to find + # the maximum value that MorphActions use as index + # into this table. + numLookups = 0 + for state in table.States: + for t in state.Transitions.values(): + if isinstance(t, ContextualMorphAction): + if t.MarkIndex != 0xFFFF: + numLookups = max(numLookups, t.MarkIndex + 1) + if t.CurrentIndex != 0xFFFF: + numLookups = max(numLookups, t.CurrentIndex + 1) + return numLookups + + def _readPerGlyphLookups(self, table, reader, font): + pos = reader.pos + lookups = [] + for _ in range(self._countPerGlyphLookups(table)): + lookupReader = reader.getSubReader(0) + lookupReader.seek(pos + reader.readULong()) + lookups.append(self.perGlyphLookup.read(lookupReader, font, {})) + return lookups + + def write(self, writer, font, tableDict, value, repeatIndex=None): + glyphClassWriter = OTTableWriter() + self.classLookup.write( + glyphClassWriter, font, tableDict, value.GlyphClasses, repeatIndex=None + ) + glyphClassData = pad(glyphClassWriter.getAllData(), 2) + glyphClassCount = max(value.GlyphClasses.values()) + 1 + glyphClassTableOffset = 16 # size of STXHeader + if self.perGlyphLookup is not None: + glyphClassTableOffset += 4 + + glyphClassTableOffset += self.tableClass.actionHeaderSize + actionData, actionIndex = self.tableClass.compileActions(font, value.States) + stateArrayData, entryTableData = self._compileStates( + font, value.States, glyphClassCount, actionIndex + ) + stateArrayOffset = glyphClassTableOffset + len(glyphClassData) + entryTableOffset = stateArrayOffset + len(stateArrayData) + perGlyphOffset = entryTableOffset + len(entryTableData) + perGlyphData = pad(self._compilePerGlyphLookups(value, font), 4) + if actionData is not None: + actionOffset = entryTableOffset + len(entryTableData) + else: + actionOffset = None + + ligaturesOffset, ligComponentsOffset = None, None + ligComponentsData = self._compileLigComponents(value, font) + ligaturesData = self._compileLigatures(value, font) + if ligComponentsData is not None: + assert len(perGlyphData) == 0 + ligComponentsOffset = actionOffset + len(actionData) + ligaturesOffset = ligComponentsOffset + len(ligComponentsData) + + writer.writeULong(glyphClassCount) + writer.writeULong(glyphClassTableOffset) + writer.writeULong(stateArrayOffset) + writer.writeULong(entryTableOffset) + if self.perGlyphLookup is not None: + writer.writeULong(perGlyphOffset) + if actionOffset is not None: + writer.writeULong(actionOffset) + if ligComponentsOffset is not None: + writer.writeULong(ligComponentsOffset) + writer.writeULong(ligaturesOffset) + writer.writeData(glyphClassData) + writer.writeData(stateArrayData) + writer.writeData(entryTableData) + writer.writeData(perGlyphData) + if actionData is not None: + writer.writeData(actionData) + if ligComponentsData is not None: + writer.writeData(ligComponentsData) + if ligaturesData is not None: + writer.writeData(ligaturesData) + + def _compileStates(self, font, states, glyphClassCount, actionIndex): + stateArrayWriter = OTTableWriter() + entries, entryIDs = [], {} + for state in states: + for glyphClass in range(glyphClassCount): + transition = state.Transitions[glyphClass] + entryWriter = OTTableWriter() + transition.compile(entryWriter, font, actionIndex) + entryData = entryWriter.getAllData() + assert ( + len(entryData) == transition.staticSize + ), "%s has staticSize %d, " "but actually wrote %d bytes" % ( + repr(transition), + transition.staticSize, + len(entryData), + ) + entryIndex = entryIDs.get(entryData) + if entryIndex is None: + entryIndex = len(entries) + entryIDs[entryData] = entryIndex + entries.append(entryData) + stateArrayWriter.writeUShort(entryIndex) + stateArrayData = pad(stateArrayWriter.getAllData(), 4) + entryTableData = pad(bytesjoin(entries), 4) + return stateArrayData, entryTableData + + def _compilePerGlyphLookups(self, table, font): + if self.perGlyphLookup is None: + return b"" + numLookups = self._countPerGlyphLookups(table) + assert len(table.PerGlyphLookups) == numLookups, ( + "len(AATStateTable.PerGlyphLookups) is %d, " + "but the actions inside the table refer to %d" + % (len(table.PerGlyphLookups), numLookups) + ) + writer = OTTableWriter() + for lookup in table.PerGlyphLookups: + lookupWriter = writer.getSubWriter() + self.perGlyphLookup.write(lookupWriter, font, {}, lookup, None) + writer.writeSubTable(lookupWriter, offsetSize=4) + return writer.getAllData() + + def _compileLigComponents(self, table, font): + if not hasattr(table, "LigComponents"): + return None + writer = OTTableWriter() + for component in table.LigComponents: + writer.writeUShort(component) + return writer.getAllData() + + def _compileLigatures(self, table, font): + if not hasattr(table, "Ligatures"): + return None + writer = OTTableWriter() + for glyphName in table.Ligatures: + writer.writeUShort(font.getGlyphID(glyphName)) + return writer.getAllData() + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + xmlWriter.comment("GlyphClassCount=%s" % value.GlyphClassCount) + xmlWriter.newline() + for g, klass in sorted(value.GlyphClasses.items()): + xmlWriter.simpletag("GlyphClass", glyph=g, value=klass) + xmlWriter.newline() + for stateIndex, state in enumerate(value.States): + xmlWriter.begintag("State", index=stateIndex) + xmlWriter.newline() + for glyphClass, trans in sorted(state.Transitions.items()): + trans.toXML( + xmlWriter, + font=font, + attrs={"onGlyphClass": glyphClass}, + name="Transition", + ) + xmlWriter.endtag("State") + xmlWriter.newline() + for i, lookup in enumerate(value.PerGlyphLookups): + xmlWriter.begintag("PerGlyphLookup", index=i) + xmlWriter.newline() + for glyph, val in sorted(lookup.items()): + xmlWriter.simpletag("Lookup", glyph=glyph, value=val) + xmlWriter.newline() + xmlWriter.endtag("PerGlyphLookup") + xmlWriter.newline() + if hasattr(value, "LigComponents"): + xmlWriter.begintag("LigComponents") + xmlWriter.newline() + for i, val in enumerate(getattr(value, "LigComponents")): + xmlWriter.simpletag("LigComponent", index=i, value=val) + xmlWriter.newline() + xmlWriter.endtag("LigComponents") + xmlWriter.newline() + self._xmlWriteLigatures(xmlWriter, font, value, name, attrs) + xmlWriter.endtag(name) + xmlWriter.newline() + + def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs): + if not hasattr(value, "Ligatures"): + return + xmlWriter.begintag("Ligatures") + xmlWriter.newline() + for i, g in enumerate(getattr(value, "Ligatures")): + xmlWriter.simpletag("Ligature", index=i, glyph=g) + xmlWriter.newline() + xmlWriter.endtag("Ligatures") + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + table = AATStateTable() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "GlyphClass": + glyph = eltAttrs["glyph"] + value = eltAttrs["value"] + table.GlyphClasses[glyph] = safeEval(value) + elif eltName == "State": + state = self._xmlReadState(eltAttrs, eltContent, font) + table.States.append(state) + elif eltName == "PerGlyphLookup": + lookup = self.perGlyphLookup.xmlRead(eltAttrs, eltContent, font) + table.PerGlyphLookups.append(lookup) + elif eltName == "LigComponents": + table.LigComponents = self._xmlReadLigComponents( + eltAttrs, eltContent, font + ) + elif eltName == "Ligatures": + table.Ligatures = self._xmlReadLigatures(eltAttrs, eltContent, font) + table.GlyphClassCount = max(table.GlyphClasses.values()) + 1 + return table + + def _xmlReadState(self, attrs, content, font): + state = AATState() + for eltName, eltAttrs, eltContent in filter(istuple, content): + if eltName == "Transition": + glyphClass = safeEval(eltAttrs["onGlyphClass"]) + transition = self.tableClass() + transition.fromXML(eltName, eltAttrs, eltContent, font) + state.Transitions[glyphClass] = transition + return state + + def _xmlReadLigComponents(self, attrs, content, font): + ligComponents = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "LigComponent": + ligComponents.append(safeEval(eltAttrs["value"])) + return ligComponents + + def _xmlReadLigatures(self, attrs, content, font): + ligs = [] + for eltName, eltAttrs, _eltContent in filter(istuple, content): + if eltName == "Ligature": + ligs.append(eltAttrs["glyph"]) + return ligs + + +class CIDGlyphMap(BaseConverter): + def read(self, reader, font, tableDict): + numCIDs = reader.readUShort() + result = {} + for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)): + if glyphID != 0xFFFF: + result[cid] = font.getGlyphName(glyphID) + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = {cid: font.getGlyphID(glyph) for cid, glyph in value.items()} + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for cid in range(count): + writer.writeUShort(items.get(cid, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[safeEval(eAttrs["cid"])] = eAttrs["glyph"].strip() + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for cid, glyph in sorted(value.items()): + if glyph is not None and glyph != 0xFFFF: + xmlWriter.simpletag("CID", cid=cid, glyph=glyph) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class GlyphCIDMap(BaseConverter): + def read(self, reader, font, tableDict): + glyphOrder = font.getGlyphOrder() + count = reader.readUShort() + cids = reader.readUShortArray(count) + if count > len(glyphOrder): + log.warning( + "GlyphCIDMap has %d elements, " + "but the font has only %d glyphs; " + "ignoring the rest" % (count, len(glyphOrder)) + ) + result = {} + for glyphID in range(min(len(cids), len(glyphOrder))): + cid = cids[glyphID] + if cid != 0xFFFF: + result[glyphOrder[glyphID]] = cid + return result + + def write(self, writer, font, tableDict, value, repeatIndex=None): + items = { + font.getGlyphID(g): cid + for g, cid in value.items() + if cid is not None and cid != 0xFFFF + } + count = max(items) + 1 if items else 0 + writer.writeUShort(count) + for glyphID in range(count): + writer.writeUShort(items.get(glyphID, 0xFFFF)) + + def xmlRead(self, attrs, content, font): + result = {} + for eName, eAttrs, _eContent in filter(istuple, content): + if eName == "CID": + result[eAttrs["glyph"]] = safeEval(eAttrs["value"]) + return result + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.begintag(name, attrs) + xmlWriter.newline() + for glyph, cid in sorted(value.items()): + if cid is not None and cid != 0xFFFF: + xmlWriter.simpletag("CID", glyph=glyph, value=cid) + xmlWriter.newline() + xmlWriter.endtag(name) + xmlWriter.newline() + + +class DeltaValue(BaseConverter): + def read(self, reader, font, tableDict): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + minusOffset = 1 << nBits + mask = (1 << nBits) - 1 + signMask = 1 << (nBits - 1) + + DeltaValue = [] + tmp, shift = 0, 0 + for i in range(nItems): + if shift == 0: + tmp, shift = reader.readUShort(), 16 + shift = shift - nBits + value = (tmp >> shift) & mask + if value & signMask: + value = value - minusOffset + DeltaValue.append(value) + return DeltaValue + + def write(self, writer, font, tableDict, value, repeatIndex=None): + StartSize = tableDict["StartSize"] + EndSize = tableDict["EndSize"] + DeltaFormat = tableDict["DeltaFormat"] + DeltaValue = value + assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat" + nItems = EndSize - StartSize + 1 + nBits = 1 << DeltaFormat + assert len(DeltaValue) == nItems + mask = (1 << nBits) - 1 + + tmp, shift = 0, 16 + for value in DeltaValue: + shift = shift - nBits + tmp = tmp | ((value & mask) << shift) + if shift == 0: + writer.writeUShort(tmp) + tmp, shift = 0, 16 + if shift != 16: + writer.writeUShort(tmp) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + +class VarIdxMapValue(BaseConverter): + def read(self, reader, font, tableDict): + fmt = tableDict["EntryFormat"] + nItems = tableDict["MappingCount"] + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1 << innerBits) - 1 + outerMask = 0xFFFFFFFF - innerMask + outerShift = 16 - innerBits + + entrySize = 1 + ((fmt & 0x0030) >> 4) + readArray = { + 1: reader.readUInt8Array, + 2: reader.readUShortArray, + 3: reader.readUInt24Array, + 4: reader.readULongArray, + }[entrySize] + + return [ + (((raw & outerMask) << outerShift) | (raw & innerMask)) + for raw in readArray(nItems) + ] + + def write(self, writer, font, tableDict, value, repeatIndex=None): + fmt = tableDict["EntryFormat"] + mapping = value + writer["MappingCount"].setValue(len(mapping)) + + innerBits = 1 + (fmt & 0x000F) + innerMask = (1 << innerBits) - 1 + outerShift = 16 - innerBits + + entrySize = 1 + ((fmt & 0x0030) >> 4) + writeArray = { + 1: writer.writeUInt8Array, + 2: writer.writeUShortArray, + 3: writer.writeUInt24Array, + 4: writer.writeULongArray, + }[entrySize] + + writeArray( + [ + (((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask)) + for idx in mapping + ] + ) + + +class VarDataValue(BaseConverter): + def read(self, reader, font, tableDict): + values = [] + + regionCount = tableDict["VarRegionCount"] + wordCount = tableDict["NumShorts"] + + # https://github.com/fonttools/fonttools/issues/2279 + longWords = bool(wordCount & 0x8000) + wordCount = wordCount & 0x7FFF + + if longWords: + readBigArray, readSmallArray = reader.readLongArray, reader.readShortArray + else: + readBigArray, readSmallArray = reader.readShortArray, reader.readInt8Array + + n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount) + values.extend(readBigArray(n1)) + values.extend(readSmallArray(n2 - n1)) + if n2 > regionCount: # Padding + del values[regionCount:] + + return values + + def write(self, writer, font, tableDict, values, repeatIndex=None): + regionCount = tableDict["VarRegionCount"] + wordCount = tableDict["NumShorts"] + + # https://github.com/fonttools/fonttools/issues/2279 + longWords = bool(wordCount & 0x8000) + wordCount = wordCount & 0x7FFF + + (writeBigArray, writeSmallArray) = { + False: (writer.writeShortArray, writer.writeInt8Array), + True: (writer.writeLongArray, writer.writeShortArray), + }[longWords] + + n1, n2 = min(regionCount, wordCount), max(regionCount, wordCount) + writeBigArray(values[:n1]) + writeSmallArray(values[n1:regionCount]) + if n2 > regionCount: # Padding + writer.writeSmallArray([0] * (n2 - regionCount)) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + +class TupleValues: + def read(self, data, font): + return TupleVariation.decompileDeltas_(None, data)[0] + + def write(self, writer, font, tableDict, values, repeatIndex=None): + return bytes(TupleVariation.compileDeltaValues_(values)) + + def xmlRead(self, attrs, content, font): + return safeEval(attrs["value"]) + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + xmlWriter.newline() + + +class CFF2Index(BaseConverter): + def __init__( + self, + name, + repeat, + aux, + tableClass=None, + *, + itemClass=None, + itemConverterClass=None, + description="", + ): + BaseConverter.__init__( + self, name, repeat, aux, tableClass, description=description + ) + self._itemClass = itemClass + self._converter = ( + itemConverterClass() if itemConverterClass is not None else None + ) + + def read(self, reader, font, tableDict): + count = reader.readULong() + if count == 0: + return [] + offSize = reader.readUInt8() + + def getReadArray(reader, offSize): + return { + 1: reader.readUInt8Array, + 2: reader.readUShortArray, + 3: reader.readUInt24Array, + 4: reader.readULongArray, + }[offSize] + + readArray = getReadArray(reader, offSize) + + lazy = font.lazy is not False and count > 8 + if not lazy: + offsets = readArray(count + 1) + items = [] + lastOffset = offsets.pop(0) + reader.readData(lastOffset - 1) # In case first offset is not 1 + + for offset in offsets: + assert lastOffset <= offset + item = reader.readData(offset - lastOffset) + + if self._itemClass is not None: + obj = self._itemClass() + obj.decompile(item, font, reader.localState) + item = obj + elif self._converter is not None: + item = self._converter.read(item, font) + + items.append(item) + lastOffset = offset + return items + else: + + def get_read_item(): + reader_copy = reader.copy() + offset_pos = reader.pos + data_pos = offset_pos + (count + 1) * offSize - 1 + readArray = getReadArray(reader_copy, offSize) + + def read_item(i): + reader_copy.seek(offset_pos + i * offSize) + offsets = readArray(2) + reader_copy.seek(data_pos + offsets[0]) + item = reader_copy.readData(offsets[1] - offsets[0]) + + if self._itemClass is not None: + obj = self._itemClass() + obj.decompile(item, font, reader_copy.localState) + item = obj + elif self._converter is not None: + item = self._converter.read(item, font) + return item + + return read_item + + read_item = get_read_item() + l = LazyList([read_item] * count) + + # TODO: Advance reader + + return l + + def write(self, writer, font, tableDict, values, repeatIndex=None): + items = values + + writer.writeULong(len(items)) + if not len(items): + return + + if self._itemClass is not None: + items = [item.compile(font) for item in items] + elif self._converter is not None: + items = [ + self._converter.write(writer, font, tableDict, item, i) + for i, item in enumerate(items) + ] + + offsets = [len(item) for item in items] + offsets = list(accumulate(offsets, initial=1)) + + lastOffset = offsets[-1] + offSize = ( + 1 + if lastOffset < 0x100 + else 2 if lastOffset < 0x10000 else 3 if lastOffset < 0x1000000 else 4 + ) + writer.writeUInt8(offSize) + + writeArray = { + 1: writer.writeUInt8Array, + 2: writer.writeUShortArray, + 3: writer.writeUInt24Array, + 4: writer.writeULongArray, + }[offSize] + + writeArray(offsets) + for item in items: + writer.writeData(item) + + def xmlRead(self, attrs, content, font): + if self._itemClass is not None: + obj = self._itemClass() + obj.fromXML(None, attrs, content, font) + return obj + elif self._converter is not None: + return self._converter.xmlRead(attrs, content, font) + else: + raise NotImplementedError() + + def xmlWrite(self, xmlWriter, font, value, name, attrs): + if self._itemClass is not None: + for i, item in enumerate(value): + item.toXML(xmlWriter, font, [("index", i)], name) + elif self._converter is not None: + for i, item in enumerate(value): + self._converter.xmlWrite( + xmlWriter, font, item, name, attrs + [("index", i)] + ) + else: + raise NotImplementedError() + + +class LookupFlag(UShort): + def xmlWrite(self, xmlWriter, font, value, name, attrs): + xmlWriter.simpletag(name, attrs + [("value", value)]) + flags = [] + if value & 0x01: + flags.append("rightToLeft") + if value & 0x02: + flags.append("ignoreBaseGlyphs") + if value & 0x04: + flags.append("ignoreLigatures") + if value & 0x08: + flags.append("ignoreMarks") + if value & 0x10: + flags.append("useMarkFilteringSet") + if value & 0xFF00: + flags.append("markAttachmentType[%i]" % (value >> 8)) + if flags: + xmlWriter.comment(" ".join(flags)) + xmlWriter.newline() + + +class _UInt8Enum(UInt8): + enumClass = NotImplemented + + def read(self, reader, font, tableDict): + return self.enumClass(super().read(reader, font, tableDict)) + + @classmethod + def fromString(cls, value): + return getattr(cls.enumClass, value.upper()) + + @classmethod + def toString(cls, value): + return cls.enumClass(value).name.lower() + + +class ExtendMode(_UInt8Enum): + enumClass = _ExtendMode + + +class CompositeMode(_UInt8Enum): + enumClass = _CompositeMode + + +converterMapping = { + # type class + "int8": Int8, + "int16": Short, + "uint8": UInt8, + "uint16": UShort, + "uint24": UInt24, + "uint32": ULong, + "char64": Char64, + "Flags32": Flags32, + "VarIndex": VarIndex, + "Version": Version, + "Tag": Tag, + "GlyphID": GlyphID, + "GlyphID32": GlyphID32, + "NameID": NameID, + "DeciPoints": DeciPoints, + "Fixed": Fixed, + "F2Dot14": F2Dot14, + "Angle": Angle, + "BiasedAngle": BiasedAngle, + "struct": Struct, + "Offset": Table, + "LOffset": LTable, + "Offset24": Table24, + "ValueRecord": ValueRecord, + "DeltaValue": DeltaValue, + "VarIdxMapValue": VarIdxMapValue, + "VarDataValue": VarDataValue, + "LookupFlag": LookupFlag, + "ExtendMode": ExtendMode, + "CompositeMode": CompositeMode, + "STATFlags": STATFlags, + "TupleList": partial(CFF2Index, itemConverterClass=TupleValues), + "VarCompositeGlyphList": partial(CFF2Index, itemClass=VarCompositeGlyph), + # AAT + "CIDGlyphMap": CIDGlyphMap, + "GlyphCIDMap": GlyphCIDMap, + "MortChain": StructWithLength, + "MortSubtable": StructWithLength, + "MorxChain": StructWithLength, + "MorxSubtable": MorxSubtableConverter, + # "Template" types + "AATLookup": lambda C: partial(AATLookup, tableClass=C), + "AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C), + "STXHeader": lambda C: partial(STXHeader, tableClass=C), + "OffsetTo": lambda C: partial(Table, tableClass=C), + "LOffsetTo": lambda C: partial(LTable, tableClass=C), + "LOffset24To": lambda C: partial(Table24, tableClass=C), +} diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otData.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otData.py new file mode 100644 index 0000000000000000000000000000000000000000..3a01f5934f8a7cb547258ed28ca4e9ce651280b4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otData.py @@ -0,0 +1,6400 @@ +otData = [ + # + # common + # + ("LookupOrder", []), + ( + "ScriptList", + [ + ("uint16", "ScriptCount", None, None, "Number of ScriptRecords"), + ( + "struct", + "ScriptRecord", + "ScriptCount", + 0, + "Array of ScriptRecords -listed alphabetically by ScriptTag", + ), + ], + ), + ( + "ScriptRecord", + [ + ("Tag", "ScriptTag", None, None, "4-byte ScriptTag identifier"), + ( + "Offset", + "Script", + None, + None, + "Offset to Script table-from beginning of ScriptList", + ), + ], + ), + ( + "Script", + [ + ( + "Offset", + "DefaultLangSys", + None, + None, + "Offset to DefaultLangSys table-from beginning of Script table-may be NULL", + ), + ( + "uint16", + "LangSysCount", + None, + None, + "Number of LangSysRecords for this script-excluding the DefaultLangSys", + ), + ( + "struct", + "LangSysRecord", + "LangSysCount", + 0, + "Array of LangSysRecords-listed alphabetically by LangSysTag", + ), + ], + ), + ( + "LangSysRecord", + [ + ("Tag", "LangSysTag", None, None, "4-byte LangSysTag identifier"), + ( + "Offset", + "LangSys", + None, + None, + "Offset to LangSys table-from beginning of Script table", + ), + ], + ), + ( + "LangSys", + [ + ( + "Offset", + "LookupOrder", + None, + None, + "= NULL (reserved for an offset to a reordering table)", + ), + ( + "uint16", + "ReqFeatureIndex", + None, + None, + "Index of a feature required for this language system- if no required features = 0xFFFF", + ), + ( + "uint16", + "FeatureCount", + None, + None, + "Number of FeatureIndex values for this language system-excludes the required feature", + ), + ( + "uint16", + "FeatureIndex", + "FeatureCount", + 0, + "Array of indices into the FeatureList-in arbitrary order", + ), + ], + ), + ( + "FeatureList", + [ + ( + "uint16", + "FeatureCount", + None, + None, + "Number of FeatureRecords in this table", + ), + ( + "struct", + "FeatureRecord", + "FeatureCount", + 0, + "Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag", + ), + ], + ), + ( + "FeatureRecord", + [ + ("Tag", "FeatureTag", None, None, "4-byte feature identification tag"), + ( + "Offset", + "Feature", + None, + None, + "Offset to Feature table-from beginning of FeatureList", + ), + ], + ), + ( + "Feature", + [ + ( + "Offset", + "FeatureParams", + None, + None, + "= NULL (reserved for offset to FeatureParams)", + ), + ( + "uint16", + "LookupCount", + None, + None, + "Number of LookupList indices for this feature", + ), + ( + "uint16", + "LookupListIndex", + "LookupCount", + 0, + "Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)", + ), + ], + ), + ("FeatureParams", []), + ( + "FeatureParamsSize", + [ + ( + "DeciPoints", + "DesignSize", + None, + None, + "The design size in 720/inch units (decipoints).", + ), + ( + "uint16", + "SubfamilyID", + None, + None, + "Serves as an identifier that associates fonts in a subfamily.", + ), + ("NameID", "SubfamilyNameID", None, None, "Subfamily NameID."), + ( + "DeciPoints", + "RangeStart", + None, + None, + "Small end of recommended usage range (exclusive) in 720/inch units.", + ), + ( + "DeciPoints", + "RangeEnd", + None, + None, + "Large end of recommended usage range (inclusive) in 720/inch units.", + ), + ], + ), + ( + "FeatureParamsStylisticSet", + [ + ("uint16", "Version", None, None, "Set to 0."), + ("NameID", "UINameID", None, None, "UI NameID."), + ], + ), + ( + "FeatureParamsCharacterVariants", + [ + ("uint16", "Format", None, None, "Set to 0."), + ("NameID", "FeatUILabelNameID", None, None, "Feature UI label NameID."), + ( + "NameID", + "FeatUITooltipTextNameID", + None, + None, + "Feature UI tooltip text NameID.", + ), + ("NameID", "SampleTextNameID", None, None, "Sample text NameID."), + ("uint16", "NumNamedParameters", None, None, "Number of named parameters."), + ( + "NameID", + "FirstParamUILabelNameID", + None, + None, + "First NameID of UI feature parameters.", + ), + ( + "uint16", + "CharCount", + None, + None, + "Count of characters this feature provides glyph variants for.", + ), + ( + "uint24", + "Character", + "CharCount", + 0, + "Unicode characters for which this feature provides glyph variants.", + ), + ], + ), + ( + "LookupList", + [ + ("uint16", "LookupCount", None, None, "Number of lookups in this table"), + ( + "Offset", + "Lookup", + "LookupCount", + 0, + "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)", + ), + ], + ), + ( + "Lookup", + [ + ( + "uint16", + "LookupType", + None, + None, + "Different enumerations for GSUB and GPOS", + ), + ("LookupFlag", "LookupFlag", None, None, "Lookup qualifiers"), + ( + "uint16", + "SubTableCount", + None, + None, + "Number of SubTables for this lookup", + ), + ( + "Offset", + "SubTable", + "SubTableCount", + 0, + "Array of offsets to SubTables-from beginning of Lookup table", + ), + ( + "uint16", + "MarkFilteringSet", + None, + "LookupFlag & 0x0010", + "If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.", + ), + ], + ), + ( + "CoverageFormat1", + [ + ("uint16", "CoverageFormat", None, None, "Format identifier-format = 1"), + ("uint16", "GlyphCount", None, None, "Number of glyphs in the GlyphArray"), + ( + "GlyphID", + "GlyphArray", + "GlyphCount", + 0, + "Array of GlyphIDs-in numerical order", + ), + ], + ), + ( + "CoverageFormat2", + [ + ("uint16", "CoverageFormat", None, None, "Format identifier-format = 2"), + ("uint16", "RangeCount", None, None, "Number of RangeRecords"), + ( + "struct", + "RangeRecord", + "RangeCount", + 0, + "Array of glyph ranges-ordered by Start GlyphID", + ), + ], + ), + ( + "RangeRecord", + [ + ("GlyphID", "Start", None, None, "First GlyphID in the range"), + ("GlyphID", "End", None, None, "Last GlyphID in the range"), + ( + "uint16", + "StartCoverageIndex", + None, + None, + "Coverage Index of first GlyphID in range", + ), + ], + ), + ( + "ClassDefFormat1", + [ + ("uint16", "ClassFormat", None, None, "Format identifier-format = 1"), + ( + "GlyphID", + "StartGlyph", + None, + None, + "First GlyphID of the ClassValueArray", + ), + ("uint16", "GlyphCount", None, None, "Size of the ClassValueArray"), + ( + "uint16", + "ClassValueArray", + "GlyphCount", + 0, + "Array of Class Values-one per GlyphID", + ), + ], + ), + ( + "ClassDefFormat2", + [ + ("uint16", "ClassFormat", None, None, "Format identifier-format = 2"), + ("uint16", "ClassRangeCount", None, None, "Number of ClassRangeRecords"), + ( + "struct", + "ClassRangeRecord", + "ClassRangeCount", + 0, + "Array of ClassRangeRecords-ordered by Start GlyphID", + ), + ], + ), + ( + "ClassRangeRecord", + [ + ("GlyphID", "Start", None, None, "First GlyphID in the range"), + ("GlyphID", "End", None, None, "Last GlyphID in the range"), + ("uint16", "Class", None, None, "Applied to all glyphs in the range"), + ], + ), + ( + "Device", + [ + ("uint16", "StartSize", None, None, "Smallest size to correct-in ppem"), + ("uint16", "EndSize", None, None, "Largest size to correct-in ppem"), + ( + "uint16", + "DeltaFormat", + None, + None, + "Format of DeltaValue array data: 1, 2, or 3", + ), + ( + "DeltaValue", + "DeltaValue", + "", + "DeltaFormat in (1,2,3)", + "Array of compressed data", + ), + ], + ), + # + # gpos + # + ( + "GPOS", + [ + ( + "Version", + "Version", + None, + None, + "Version of the GPOS table- 0x00010000 or 0x00010001", + ), + ( + "Offset", + "ScriptList", + None, + None, + "Offset to ScriptList table-from beginning of GPOS table", + ), + ( + "Offset", + "FeatureList", + None, + None, + "Offset to FeatureList table-from beginning of GPOS table", + ), + ( + "Offset", + "LookupList", + None, + None, + "Offset to LookupList table-from beginning of GPOS table", + ), + ( + "LOffset", + "FeatureVariations", + None, + "Version >= 0x00010001", + "Offset to FeatureVariations table-from beginning of GPOS table", + ), + ], + ), + ( + "SinglePosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of SinglePos subtable", + ), + ( + "uint16", + "ValueFormat", + None, + None, + "Defines the types of data in the ValueRecord", + ), + ( + "ValueRecord", + "Value", + None, + None, + "Defines positioning value(s)-applied to all glyphs in the Coverage table", + ), + ], + ), + ( + "SinglePosFormat2", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of SinglePos subtable", + ), + ( + "uint16", + "ValueFormat", + None, + None, + "Defines the types of data in the ValueRecord", + ), + ("uint16", "ValueCount", None, None, "Number of ValueRecords"), + ( + "ValueRecord", + "Value", + "ValueCount", + 0, + "Array of ValueRecords-positioning values applied to glyphs", + ), + ], + ), + ( + "PairPosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair", + ), + ( + "uint16", + "ValueFormat1", + None, + None, + "Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)", + ), + ( + "uint16", + "ValueFormat2", + None, + None, + "Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)", + ), + ("uint16", "PairSetCount", None, None, "Number of PairSet tables"), + ( + "Offset", + "PairSet", + "PairSetCount", + 0, + "Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index", + ), + ], + ), + ( + "PairSet", + [ + ("uint16", "PairValueCount", None, None, "Number of PairValueRecords"), + ( + "struct", + "PairValueRecord", + "PairValueCount", + 0, + "Array of PairValueRecords-ordered by GlyphID of the second glyph", + ), + ], + ), + ( + "PairValueRecord", + [ + ( + "GlyphID", + "SecondGlyph", + None, + None, + "GlyphID of second glyph in the pair-first glyph is listed in the Coverage table", + ), + ( + "ValueRecord", + "Value1", + None, + None, + "Positioning data for the first glyph in the pair", + ), + ( + "ValueRecord", + "Value2", + None, + None, + "Positioning data for the second glyph in the pair", + ), + ], + ), + ( + "PairPosFormat2", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair", + ), + ( + "uint16", + "ValueFormat1", + None, + None, + "ValueRecord definition-for the first glyph of the pair-may be zero (0)", + ), + ( + "uint16", + "ValueFormat2", + None, + None, + "ValueRecord definition-for the second glyph of the pair-may be zero (0)", + ), + ( + "Offset", + "ClassDef1", + None, + None, + "Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair", + ), + ( + "Offset", + "ClassDef2", + None, + None, + "Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair", + ), + ( + "uint16", + "Class1Count", + None, + None, + "Number of classes in ClassDef1 table-includes Class0", + ), + ( + "uint16", + "Class2Count", + None, + None, + "Number of classes in ClassDef2 table-includes Class0", + ), + ( + "struct", + "Class1Record", + "Class1Count", + 0, + "Array of Class1 records-ordered by Class1", + ), + ], + ), + ( + "Class1Record", + [ + ( + "struct", + "Class2Record", + "Class2Count", + 0, + "Array of Class2 records-ordered by Class2", + ), + ], + ), + ( + "Class2Record", + [ + ( + "ValueRecord", + "Value1", + None, + None, + "Positioning for first glyph-empty if ValueFormat1 = 0", + ), + ( + "ValueRecord", + "Value2", + None, + None, + "Positioning for second glyph-empty if ValueFormat2 = 0", + ), + ], + ), + ( + "CursivePosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of CursivePos subtable", + ), + ("uint16", "EntryExitCount", None, None, "Number of EntryExit records"), + ( + "struct", + "EntryExitRecord", + "EntryExitCount", + 0, + "Array of EntryExit records-in Coverage Index order", + ), + ], + ), + ( + "EntryExitRecord", + [ + ( + "Offset", + "EntryAnchor", + None, + None, + "Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL", + ), + ( + "Offset", + "ExitAnchor", + None, + None, + "Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL", + ), + ], + ), + ( + "MarkBasePosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "MarkCoverage", + None, + None, + "Offset to MarkCoverage table-from beginning of MarkBasePos subtable", + ), + ( + "Offset", + "BaseCoverage", + None, + None, + "Offset to BaseCoverage table-from beginning of MarkBasePos subtable", + ), + ("uint16", "ClassCount", None, None, "Number of classes defined for marks"), + ( + "Offset", + "MarkArray", + None, + None, + "Offset to MarkArray table-from beginning of MarkBasePos subtable", + ), + ( + "Offset", + "BaseArray", + None, + None, + "Offset to BaseArray table-from beginning of MarkBasePos subtable", + ), + ], + ), + ( + "BaseArray", + [ + ("uint16", "BaseCount", None, None, "Number of BaseRecords"), + ( + "struct", + "BaseRecord", + "BaseCount", + 0, + "Array of BaseRecords-in order of BaseCoverage Index", + ), + ], + ), + ( + "BaseRecord", + [ + ( + "Offset", + "BaseAnchor", + "ClassCount", + 0, + "Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based", + ), + ], + ), + ( + "MarkLigPosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "MarkCoverage", + None, + None, + "Offset to Mark Coverage table-from beginning of MarkLigPos subtable", + ), + ( + "Offset", + "LigatureCoverage", + None, + None, + "Offset to Ligature Coverage table-from beginning of MarkLigPos subtable", + ), + ("uint16", "ClassCount", None, None, "Number of defined mark classes"), + ( + "Offset", + "MarkArray", + None, + None, + "Offset to MarkArray table-from beginning of MarkLigPos subtable", + ), + ( + "Offset", + "LigatureArray", + None, + None, + "Offset to LigatureArray table-from beginning of MarkLigPos subtable", + ), + ], + ), + ( + "LigatureArray", + [ + ( + "uint16", + "LigatureCount", + None, + None, + "Number of LigatureAttach table offsets", + ), + ( + "Offset", + "LigatureAttach", + "LigatureCount", + 0, + "Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index", + ), + ], + ), + ( + "LigatureAttach", + [ + ( + "uint16", + "ComponentCount", + None, + None, + "Number of ComponentRecords in this ligature", + ), + ( + "struct", + "ComponentRecord", + "ComponentCount", + 0, + "Array of Component records-ordered in writing direction", + ), + ], + ), + ( + "ComponentRecord", + [ + ( + "Offset", + "LigatureAnchor", + "ClassCount", + 0, + "Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array", + ), + ], + ), + ( + "MarkMarkPosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Mark1Coverage", + None, + None, + "Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable", + ), + ( + "Offset", + "Mark2Coverage", + None, + None, + "Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable", + ), + ( + "uint16", + "ClassCount", + None, + None, + "Number of Combining Mark classes defined", + ), + ( + "Offset", + "Mark1Array", + None, + None, + "Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable", + ), + ( + "Offset", + "Mark2Array", + None, + None, + "Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable", + ), + ], + ), + ( + "Mark2Array", + [ + ("uint16", "Mark2Count", None, None, "Number of Mark2 records"), + ( + "struct", + "Mark2Record", + "Mark2Count", + 0, + "Array of Mark2 records-in Coverage order", + ), + ], + ), + ( + "Mark2Record", + [ + ( + "Offset", + "Mark2Anchor", + "ClassCount", + 0, + "Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array", + ), + ], + ), + ( + "PosLookupRecord", + [ + ( + "uint16", + "SequenceIndex", + None, + None, + "Index to input glyph sequence-first glyph = 0", + ), + ( + "uint16", + "LookupListIndex", + None, + None, + "Lookup to apply to that position-zero-based", + ), + ], + ), + ( + "ContextPosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of ContextPos subtable", + ), + ("uint16", "PosRuleSetCount", None, None, "Number of PosRuleSet tables"), + ( + "Offset", + "PosRuleSet", + "PosRuleSetCount", + 0, + "Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index", + ), + ], + ), + ( + "PosRuleSet", + [ + ("uint16", "PosRuleCount", None, None, "Number of PosRule tables"), + ( + "Offset", + "PosRule", + "PosRuleCount", + 0, + "Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference", + ), + ], + ), + ( + "PosRule", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Number of glyphs in the Input glyph sequence", + ), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "GlyphID", + "Input", + "GlyphCount", + -1, + "Array of input GlyphIDs-starting with the second glyph", + ), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of positioning lookups-in design order", + ), + ], + ), + ( + "ContextPosFormat2", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of ContextPos subtable", + ), + ( + "Offset", + "ClassDef", + None, + None, + "Offset to ClassDef table-from beginning of ContextPos subtable", + ), + ("uint16", "PosClassSetCount", None, None, "Number of PosClassSet tables"), + ( + "Offset", + "PosClassSet", + "PosClassSetCount", + 0, + "Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL", + ), + ], + ), + ( + "PosClassSet", + [ + ( + "uint16", + "PosClassRuleCount", + None, + None, + "Number of PosClassRule tables", + ), + ( + "Offset", + "PosClassRule", + "PosClassRuleCount", + 0, + "Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference", + ), + ], + ), + ( + "PosClassRule", + [ + ("uint16", "GlyphCount", None, None, "Number of glyphs to be matched"), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "uint16", + "Class", + "GlyphCount", + -1, + "Array of classes-beginning with the second class-to be matched to the input glyph sequence", + ), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of positioning lookups-in design order", + ), + ], + ), + ( + "ContextPosFormat3", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 3"), + ( + "uint16", + "GlyphCount", + None, + None, + "Number of glyphs in the input sequence", + ), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "Offset", + "Coverage", + "GlyphCount", + 0, + "Array of offsets to Coverage tables-from beginning of ContextPos subtable", + ), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of positioning lookups-in design order", + ), + ], + ), + ( + "ChainContextPosFormat1", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of ContextPos subtable", + ), + ( + "uint16", + "ChainPosRuleSetCount", + None, + None, + "Number of ChainPosRuleSet tables", + ), + ( + "Offset", + "ChainPosRuleSet", + "ChainPosRuleSetCount", + 0, + "Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index", + ), + ], + ), + ( + "ChainPosRuleSet", + [ + ( + "uint16", + "ChainPosRuleCount", + None, + None, + "Number of ChainPosRule tables", + ), + ( + "Offset", + "ChainPosRule", + "ChainPosRuleCount", + 0, + "Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference", + ), + ], + ), + ( + "ChainPosRule", + [ + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", + ), + ( + "GlyphID", + "Backtrack", + "BacktrackGlyphCount", + 0, + "Array of backtracking GlyphID's (to be matched before the input sequence)", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Total number of glyphs in the input sequence (includes the first glyph)", + ), + ( + "GlyphID", + "Input", + "InputGlyphCount", + -1, + "Array of input GlyphIDs (start with second glyph)", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)", + ), + ( + "GlyphID", + "LookAhead", + "LookAheadGlyphCount", + 0, + "Array of lookahead GlyphID's (to be matched after the input sequence)", + ), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of PosLookupRecords (in design order)", + ), + ], + ), + ( + "ChainContextPosFormat2", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of ChainContextPos subtable", + ), + ( + "Offset", + "BacktrackClassDef", + None, + None, + "Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable", + ), + ( + "Offset", + "InputClassDef", + None, + None, + "Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable", + ), + ( + "Offset", + "LookAheadClassDef", + None, + None, + "Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable", + ), + ( + "uint16", + "ChainPosClassSetCount", + None, + None, + "Number of ChainPosClassSet tables", + ), + ( + "Offset", + "ChainPosClassSet", + "ChainPosClassSetCount", + 0, + "Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL", + ), + ], + ), + ( + "ChainPosClassSet", + [ + ( + "uint16", + "ChainPosClassRuleCount", + None, + None, + "Number of ChainPosClassRule tables", + ), + ( + "Offset", + "ChainPosClassRule", + "ChainPosClassRuleCount", + 0, + "Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference", + ), + ], + ), + ( + "ChainPosClassRule", + [ + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", + ), + ( + "uint16", + "Backtrack", + "BacktrackGlyphCount", + 0, + "Array of backtracking classes(to be matched before the input sequence)", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Total number of classes in the input sequence (includes the first class)", + ), + ( + "uint16", + "Input", + "InputGlyphCount", + -1, + "Array of input classes(start with second class; to be matched with the input glyph sequence)", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)", + ), + ( + "uint16", + "LookAhead", + "LookAheadGlyphCount", + 0, + "Array of lookahead classes(to be matched after the input sequence)", + ), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of PosLookupRecords (in design order)", + ), + ], + ), + ( + "ChainContextPosFormat3", + [ + ("uint16", "PosFormat", None, None, "Format identifier-format = 3"), + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Number of glyphs in the backtracking sequence", + ), + ( + "Offset", + "BacktrackCoverage", + "BacktrackGlyphCount", + 0, + "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Number of glyphs in input sequence", + ), + ( + "Offset", + "InputCoverage", + "InputGlyphCount", + 0, + "Array of offsets to coverage tables in input sequence, in glyph sequence order", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Number of glyphs in lookahead sequence", + ), + ( + "Offset", + "LookAheadCoverage", + "LookAheadGlyphCount", + 0, + "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", + ), + ("uint16", "PosCount", None, None, "Number of PosLookupRecords"), + ( + "struct", + "PosLookupRecord", + "PosCount", + 0, + "Array of PosLookupRecords,in design order", + ), + ], + ), + ( + "ExtensionPosFormat1", + [ + ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."), + ( + "uint16", + "ExtensionLookupType", + None, + None, + "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).", + ), + ("LOffset", "ExtSubTable", None, None, "Offset to SubTable"), + ], + ), + # ('ValueRecord', [ + # ('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'), + # ('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'), + # ('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'), + # ('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'), + # ('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'), + # ('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'), + # ('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'), + # ('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'), + # ]), + ( + "AnchorFormat1", + [ + ("uint16", "AnchorFormat", None, None, "Format identifier-format = 1"), + ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), + ("int16", "YCoordinate", None, None, "Vertical value-in design units"), + ], + ), + ( + "AnchorFormat2", + [ + ("uint16", "AnchorFormat", None, None, "Format identifier-format = 2"), + ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), + ("int16", "YCoordinate", None, None, "Vertical value-in design units"), + ("uint16", "AnchorPoint", None, None, "Index to glyph contour point"), + ], + ), + ( + "AnchorFormat3", + [ + ("uint16", "AnchorFormat", None, None, "Format identifier-format = 3"), + ("int16", "XCoordinate", None, None, "Horizontal value-in design units"), + ("int16", "YCoordinate", None, None, "Vertical value-in design units"), + ( + "Offset", + "XDeviceTable", + None, + None, + "Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)", + ), + ( + "Offset", + "YDeviceTable", + None, + None, + "Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)", + ), + ], + ), + ( + "MarkArray", + [ + ("uint16", "MarkCount", None, None, "Number of MarkRecords"), + ( + "struct", + "MarkRecord", + "MarkCount", + 0, + "Array of MarkRecords-in Coverage order", + ), + ], + ), + ( + "MarkRecord", + [ + ("uint16", "Class", None, None, "Class defined for this mark"), + ( + "Offset", + "MarkAnchor", + None, + None, + "Offset to Anchor table-from beginning of MarkArray table", + ), + ], + ), + # + # gsub + # + ( + "GSUB", + [ + ( + "Version", + "Version", + None, + None, + "Version of the GSUB table- 0x00010000 or 0x00010001", + ), + ( + "Offset", + "ScriptList", + None, + None, + "Offset to ScriptList table-from beginning of GSUB table", + ), + ( + "Offset", + "FeatureList", + None, + None, + "Offset to FeatureList table-from beginning of GSUB table", + ), + ( + "Offset", + "LookupList", + None, + None, + "Offset to LookupList table-from beginning of GSUB table", + ), + ( + "LOffset", + "FeatureVariations", + None, + "Version >= 0x00010001", + "Offset to FeatureVariations table-from beginning of GSUB table", + ), + ], + ), + ( + "SingleSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "DeltaGlyphID", + None, + None, + "Add to original GlyphID modulo 65536 to get substitute GlyphID", + ), + ], + ), + ( + "SingleSubstFormat2", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "GlyphCount", + None, + None, + "Number of GlyphIDs in the Substitute array", + ), + ( + "GlyphID", + "Substitute", + "GlyphCount", + 0, + "Array of substitute GlyphIDs-ordered by Coverage Index", + ), + ], + ), + ( + "MultipleSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "SequenceCount", + None, + None, + "Number of Sequence table offsets in the Sequence array", + ), + ( + "Offset", + "Sequence", + "SequenceCount", + 0, + "Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index", + ), + ], + ), + ( + "Sequence", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Number of GlyphIDs in the Substitute array. This should always be greater than 0.", + ), + ( + "GlyphID", + "Substitute", + "GlyphCount", + 0, + "String of GlyphIDs to substitute", + ), + ], + ), + ( + "AlternateSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "AlternateSetCount", + None, + None, + "Number of AlternateSet tables", + ), + ( + "Offset", + "AlternateSet", + "AlternateSetCount", + 0, + "Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index", + ), + ], + ), + ( + "AlternateSet", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Number of GlyphIDs in the Alternate array", + ), + ( + "GlyphID", + "Alternate", + "GlyphCount", + 0, + "Array of alternate GlyphIDs-in arbitrary order", + ), + ], + ), + ( + "LigatureSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ("uint16", "LigSetCount", None, None, "Number of LigatureSet tables"), + ( + "Offset", + "LigatureSet", + "LigSetCount", + 0, + "Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index", + ), + ], + ), + ( + "LigatureSet", + [ + ("uint16", "LigatureCount", None, None, "Number of Ligature tables"), + ( + "Offset", + "Ligature", + "LigatureCount", + 0, + "Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference", + ), + ], + ), + ( + "Ligature", + [ + ("GlyphID", "LigGlyph", None, None, "GlyphID of ligature to substitute"), + ("uint16", "CompCount", None, None, "Number of components in the ligature"), + ( + "GlyphID", + "Component", + "CompCount", + -1, + "Array of component GlyphIDs-start with the second component-ordered in writing direction", + ), + ], + ), + ( + "SubstLookupRecord", + [ + ( + "uint16", + "SequenceIndex", + None, + None, + "Index into current glyph sequence-first glyph = 0", + ), + ( + "uint16", + "LookupListIndex", + None, + None, + "Lookup to apply to that position-zero-based", + ), + ], + ), + ( + "ContextSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "SubRuleSetCount", + None, + None, + "Number of SubRuleSet tables-must equal GlyphCount in Coverage table", + ), + ( + "Offset", + "SubRuleSet", + "SubRuleSetCount", + 0, + "Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index", + ), + ], + ), + ( + "SubRuleSet", + [ + ("uint16", "SubRuleCount", None, None, "Number of SubRule tables"), + ( + "Offset", + "SubRule", + "SubRuleCount", + 0, + "Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference", + ), + ], + ), + ( + "SubRule", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Total number of glyphs in input glyph sequence-includes the first glyph", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "GlyphID", + "Input", + "GlyphCount", + -1, + "Array of input GlyphIDs-start with second glyph", + ), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of SubstLookupRecords-in design order", + ), + ], + ), + ( + "ContextSubstFormat2", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "Offset", + "ClassDef", + None, + None, + "Offset to glyph ClassDef table-from beginning of Substitution table", + ), + ("uint16", "SubClassSetCount", None, None, "Number of SubClassSet tables"), + ( + "Offset", + "SubClassSet", + "SubClassSetCount", + 0, + "Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL", + ), + ], + ), + ( + "SubClassSet", + [ + ( + "uint16", + "SubClassRuleCount", + None, + None, + "Number of SubClassRule tables", + ), + ( + "Offset", + "SubClassRule", + "SubClassRuleCount", + 0, + "Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference", + ), + ], + ), + ( + "SubClassRule", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Total number of classes specified for the context in the rule-includes the first class", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "uint16", + "Class", + "GlyphCount", + -1, + "Array of classes-beginning with the second class-to be matched to the input glyph class sequence", + ), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of Substitution lookups-in design order", + ), + ], + ), + ( + "ContextSubstFormat3", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"), + ( + "uint16", + "GlyphCount", + None, + None, + "Number of glyphs in the input glyph sequence", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "Offset", + "Coverage", + "GlyphCount", + 0, + "Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order", + ), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of SubstLookupRecords-in design order", + ), + ], + ), + ( + "ChainContextSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "uint16", + "ChainSubRuleSetCount", + None, + None, + "Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table", + ), + ( + "Offset", + "ChainSubRuleSet", + "ChainSubRuleSetCount", + 0, + "Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index", + ), + ], + ), + ( + "ChainSubRuleSet", + [ + ( + "uint16", + "ChainSubRuleCount", + None, + None, + "Number of ChainSubRule tables", + ), + ( + "Offset", + "ChainSubRule", + "ChainSubRuleCount", + 0, + "Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference", + ), + ], + ), + ( + "ChainSubRule", + [ + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", + ), + ( + "GlyphID", + "Backtrack", + "BacktrackGlyphCount", + 0, + "Array of backtracking GlyphID's (to be matched before the input sequence)", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Total number of glyphs in the input sequence (includes the first glyph)", + ), + ( + "GlyphID", + "Input", + "InputGlyphCount", + -1, + "Array of input GlyphIDs (start with second glyph)", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)", + ), + ( + "GlyphID", + "LookAhead", + "LookAheadGlyphCount", + 0, + "Array of lookahead GlyphID's (to be matched after the input sequence)", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of SubstLookupRecords (in design order)", + ), + ], + ), + ( + "ChainContextSubstFormat2", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 2"), + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table-from beginning of Substitution table", + ), + ( + "Offset", + "BacktrackClassDef", + None, + None, + "Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table", + ), + ( + "Offset", + "InputClassDef", + None, + None, + "Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table", + ), + ( + "Offset", + "LookAheadClassDef", + None, + None, + "Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table", + ), + ( + "uint16", + "ChainSubClassSetCount", + None, + None, + "Number of ChainSubClassSet tables", + ), + ( + "Offset", + "ChainSubClassSet", + "ChainSubClassSetCount", + 0, + "Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL", + ), + ], + ), + ( + "ChainSubClassSet", + [ + ( + "uint16", + "ChainSubClassRuleCount", + None, + None, + "Number of ChainSubClassRule tables", + ), + ( + "Offset", + "ChainSubClassRule", + "ChainSubClassRuleCount", + 0, + "Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference", + ), + ], + ), + ( + "ChainSubClassRule", + [ + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)", + ), + ( + "uint16", + "Backtrack", + "BacktrackGlyphCount", + 0, + "Array of backtracking classes(to be matched before the input sequence)", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Total number of classes in the input sequence (includes the first class)", + ), + ( + "uint16", + "Input", + "InputGlyphCount", + -1, + "Array of input classes(start with second class; to be matched with the input glyph sequence)", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)", + ), + ( + "uint16", + "LookAhead", + "LookAheadGlyphCount", + 0, + "Array of lookahead classes(to be matched after the input sequence)", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of SubstLookupRecords (in design order)", + ), + ], + ), + ( + "ChainContextSubstFormat3", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 3"), + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Number of glyphs in the backtracking sequence", + ), + ( + "Offset", + "BacktrackCoverage", + "BacktrackGlyphCount", + 0, + "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", + ), + ( + "uint16", + "InputGlyphCount", + None, + None, + "Number of glyphs in input sequence", + ), + ( + "Offset", + "InputCoverage", + "InputGlyphCount", + 0, + "Array of offsets to coverage tables in input sequence, in glyph sequence order", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Number of glyphs in lookahead sequence", + ), + ( + "Offset", + "LookAheadCoverage", + "LookAheadGlyphCount", + 0, + "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", + ), + ("uint16", "SubstCount", None, None, "Number of SubstLookupRecords"), + ( + "struct", + "SubstLookupRecord", + "SubstCount", + 0, + "Array of SubstLookupRecords, in design order", + ), + ], + ), + ( + "ExtensionSubstFormat1", + [ + ("uint16", "ExtFormat", None, None, "Format identifier. Set to 1."), + ( + "uint16", + "ExtensionLookupType", + None, + None, + "Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).", + ), + ( + "LOffset", + "ExtSubTable", + None, + None, + "Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)", + ), + ], + ), + ( + "ReverseChainSingleSubstFormat1", + [ + ("uint16", "SubstFormat", None, None, "Format identifier-format = 1"), + ( + "Offset", + "Coverage", + None, + 0, + "Offset to Coverage table - from beginning of Substitution table", + ), + ( + "uint16", + "BacktrackGlyphCount", + None, + None, + "Number of glyphs in the backtracking sequence", + ), + ( + "Offset", + "BacktrackCoverage", + "BacktrackGlyphCount", + 0, + "Array of offsets to coverage tables in backtracking sequence, in glyph sequence order", + ), + ( + "uint16", + "LookAheadGlyphCount", + None, + None, + "Number of glyphs in lookahead sequence", + ), + ( + "Offset", + "LookAheadCoverage", + "LookAheadGlyphCount", + 0, + "Array of offsets to coverage tables in lookahead sequence, in glyph sequence order", + ), + ( + "uint16", + "GlyphCount", + None, + None, + "Number of GlyphIDs in the Substitute array", + ), + ( + "GlyphID", + "Substitute", + "GlyphCount", + 0, + "Array of substitute GlyphIDs-ordered by Coverage index", + ), + ], + ), + # + # gdef + # + ( + "GDEF", + [ + ( + "Version", + "Version", + None, + None, + "Version of the GDEF table- 0x00010000, 0x00010002, or 0x00010003", + ), + ( + "Offset", + "GlyphClassDef", + None, + None, + "Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)", + ), + ( + "Offset", + "AttachList", + None, + None, + "Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)", + ), + ( + "Offset", + "LigCaretList", + None, + None, + "Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)", + ), + ( + "Offset", + "MarkAttachClassDef", + None, + None, + "Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)", + ), + ( + "Offset", + "MarkGlyphSetsDef", + None, + "Version >= 0x00010002", + "Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)", + ), + ( + "LOffset", + "VarStore", + None, + "Version >= 0x00010003", + "Offset to variation store (may be NULL)", + ), + ], + ), + ( + "AttachList", + [ + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table - from beginning of AttachList table", + ), + ( + "uint16", + "GlyphCount", + None, + None, + "Number of glyphs with attachment points", + ), + ( + "Offset", + "AttachPoint", + "GlyphCount", + 0, + "Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order", + ), + ], + ), + ( + "AttachPoint", + [ + ( + "uint16", + "PointCount", + None, + None, + "Number of attachment points on this glyph", + ), + ( + "uint16", + "PointIndex", + "PointCount", + 0, + "Array of contour point indices -in increasing numerical order", + ), + ], + ), + ( + "LigCaretList", + [ + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table - from beginning of LigCaretList table", + ), + ("uint16", "LigGlyphCount", None, None, "Number of ligature glyphs"), + ( + "Offset", + "LigGlyph", + "LigGlyphCount", + 0, + "Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order", + ), + ], + ), + ( + "LigGlyph", + [ + ( + "uint16", + "CaretCount", + None, + None, + "Number of CaretValues for this ligature (components - 1)", + ), + ( + "Offset", + "CaretValue", + "CaretCount", + 0, + "Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order", + ), + ], + ), + ( + "CaretValueFormat1", + [ + ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 1"), + ("int16", "Coordinate", None, None, "X or Y value, in design units"), + ], + ), + ( + "CaretValueFormat2", + [ + ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 2"), + ("uint16", "CaretValuePoint", None, None, "Contour point index on glyph"), + ], + ), + ( + "CaretValueFormat3", + [ + ("uint16", "CaretValueFormat", None, None, "Format identifier-format = 3"), + ("int16", "Coordinate", None, None, "X or Y value, in design units"), + ( + "Offset", + "DeviceTable", + None, + None, + "Offset to Device table for X or Y value-from beginning of CaretValue table", + ), + ], + ), + ( + "MarkGlyphSetsDef", + [ + ("uint16", "MarkSetTableFormat", None, None, "Format identifier == 1"), + ("uint16", "MarkSetCount", None, None, "Number of mark sets defined"), + ( + "LOffset", + "Coverage", + "MarkSetCount", + 0, + "Array of offsets to mark set coverage tables.", + ), + ], + ), + # + # base + # + ( + "BASE", + [ + ( + "Version", + "Version", + None, + None, + "Version of the BASE table-initially 0x00010000", + ), + ( + "Offset", + "HorizAxis", + None, + None, + "Offset to horizontal Axis table-from beginning of BASE table-may be NULL", + ), + ( + "Offset", + "VertAxis", + None, + None, + "Offset to vertical Axis table-from beginning of BASE table-may be NULL", + ), + ( + "LOffset", + "VarStore", + None, + "Version >= 0x00010001", + "Offset to variation store (may be NULL)", + ), + ], + ), + ( + "Axis", + [ + ( + "Offset", + "BaseTagList", + None, + None, + "Offset to BaseTagList table-from beginning of Axis table-may be NULL", + ), + ( + "Offset", + "BaseScriptList", + None, + None, + "Offset to BaseScriptList table-from beginning of Axis table", + ), + ], + ), + ( + "BaseTagList", + [ + ( + "uint16", + "BaseTagCount", + None, + None, + "Number of baseline identification tags in this text direction-may be zero (0)", + ), + ( + "Tag", + "BaselineTag", + "BaseTagCount", + 0, + "Array of 4-byte baseline identification tags-must be in alphabetical order", + ), + ], + ), + ( + "BaseScriptList", + [ + ( + "uint16", + "BaseScriptCount", + None, + None, + "Number of BaseScriptRecords defined", + ), + ( + "struct", + "BaseScriptRecord", + "BaseScriptCount", + 0, + "Array of BaseScriptRecords-in alphabetical order by BaseScriptTag", + ), + ], + ), + ( + "BaseScriptRecord", + [ + ("Tag", "BaseScriptTag", None, None, "4-byte script identification tag"), + ( + "Offset", + "BaseScript", + None, + None, + "Offset to BaseScript table-from beginning of BaseScriptList", + ), + ], + ), + ( + "BaseScript", + [ + ( + "Offset", + "BaseValues", + None, + None, + "Offset to BaseValues table-from beginning of BaseScript table-may be NULL", + ), + ( + "Offset", + "DefaultMinMax", + None, + None, + "Offset to MinMax table- from beginning of BaseScript table-may be NULL", + ), + ( + "uint16", + "BaseLangSysCount", + None, + None, + "Number of BaseLangSysRecords defined-may be zero (0)", + ), + ( + "struct", + "BaseLangSysRecord", + "BaseLangSysCount", + 0, + "Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag", + ), + ], + ), + ( + "BaseLangSysRecord", + [ + ( + "Tag", + "BaseLangSysTag", + None, + None, + "4-byte language system identification tag", + ), + ( + "Offset", + "MinMax", + None, + None, + "Offset to MinMax table-from beginning of BaseScript table", + ), + ], + ), + ( + "BaseValues", + [ + ( + "uint16", + "DefaultIndex", + None, + None, + "Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList", + ), + ( + "uint16", + "BaseCoordCount", + None, + None, + "Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList", + ), + ( + "Offset", + "BaseCoord", + "BaseCoordCount", + 0, + "Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList", + ), + ], + ), + ( + "MinMax", + [ + ( + "Offset", + "MinCoord", + None, + None, + "Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL", + ), + ( + "Offset", + "MaxCoord", + None, + None, + "Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL", + ), + ( + "uint16", + "FeatMinMaxCount", + None, + None, + "Number of FeatMinMaxRecords-may be zero (0)", + ), + ( + "struct", + "FeatMinMaxRecord", + "FeatMinMaxCount", + 0, + "Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag", + ), + ], + ), + ( + "FeatMinMaxRecord", + [ + ( + "Tag", + "FeatureTableTag", + None, + None, + "4-byte feature identification tag-must match FeatureTag in FeatureList", + ), + ( + "Offset", + "MinCoord", + None, + None, + "Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL", + ), + ( + "Offset", + "MaxCoord", + None, + None, + "Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL", + ), + ], + ), + ( + "BaseCoordFormat1", + [ + ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 1"), + ("int16", "Coordinate", None, None, "X or Y value, in design units"), + ], + ), + ( + "BaseCoordFormat2", + [ + ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 2"), + ("int16", "Coordinate", None, None, "X or Y value, in design units"), + ("GlyphID", "ReferenceGlyph", None, None, "GlyphID of control glyph"), + ( + "uint16", + "BaseCoordPoint", + None, + None, + "Index of contour point on the ReferenceGlyph", + ), + ], + ), + ( + "BaseCoordFormat3", + [ + ("uint16", "BaseCoordFormat", None, None, "Format identifier-format = 3"), + ("int16", "Coordinate", None, None, "X or Y value, in design units"), + ( + "Offset", + "DeviceTable", + None, + None, + "Offset to Device table for X or Y value", + ), + ], + ), + # + # jstf + # + ( + "JSTF", + [ + ( + "Version", + "Version", + None, + None, + "Version of the JSTF table-initially set to 0x00010000", + ), + ( + "uint16", + "JstfScriptCount", + None, + None, + "Number of JstfScriptRecords in this table", + ), + ( + "struct", + "JstfScriptRecord", + "JstfScriptCount", + 0, + "Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag", + ), + ], + ), + ( + "JstfScriptRecord", + [ + ("Tag", "JstfScriptTag", None, None, "4-byte JstfScript identification"), + ( + "Offset", + "JstfScript", + None, + None, + "Offset to JstfScript table-from beginning of JSTF Header", + ), + ], + ), + ( + "JstfScript", + [ + ( + "Offset", + "ExtenderGlyph", + None, + None, + "Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL", + ), + ( + "Offset", + "DefJstfLangSys", + None, + None, + "Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL", + ), + ( + "uint16", + "JstfLangSysCount", + None, + None, + "Number of JstfLangSysRecords in this table- may be zero (0)", + ), + ( + "struct", + "JstfLangSysRecord", + "JstfLangSysCount", + 0, + "Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag", + ), + ], + ), + ( + "JstfLangSysRecord", + [ + ("Tag", "JstfLangSysTag", None, None, "4-byte JstfLangSys identifier"), + ( + "Offset", + "JstfLangSys", + None, + None, + "Offset to JstfLangSys table-from beginning of JstfScript table", + ), + ], + ), + ( + "ExtenderGlyph", + [ + ( + "uint16", + "GlyphCount", + None, + None, + "Number of Extender Glyphs in this script", + ), + ( + "GlyphID", + "ExtenderGlyph", + "GlyphCount", + 0, + "GlyphIDs-in increasing numerical order", + ), + ], + ), + ( + "JstfLangSys", + [ + ( + "uint16", + "JstfPriorityCount", + None, + None, + "Number of JstfPriority tables", + ), + ( + "Offset", + "JstfPriority", + "JstfPriorityCount", + 0, + "Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order", + ), + ], + ), + ( + "JstfPriority", + [ + ( + "Offset", + "ShrinkageEnableGSUB", + None, + None, + "Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ShrinkageDisableGSUB", + None, + None, + "Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ShrinkageEnableGPOS", + None, + None, + "Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ShrinkageDisableGPOS", + None, + None, + "Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ShrinkageJstfMax", + None, + None, + "Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL", + ), + ( + "Offset", + "ExtensionEnableGSUB", + None, + None, + "Offset to Extension Enable JstfGSUBModList table-may be NULL", + ), + ( + "Offset", + "ExtensionDisableGSUB", + None, + None, + "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ExtensionEnableGPOS", + None, + None, + "Offset to Extension Enable JstfGSUBModList table-may be NULL", + ), + ( + "Offset", + "ExtensionDisableGPOS", + None, + None, + "Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL", + ), + ( + "Offset", + "ExtensionJstfMax", + None, + None, + "Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL", + ), + ], + ), + ( + "JstfGSUBModList", + [ + ( + "uint16", + "LookupCount", + None, + None, + "Number of lookups for this modification", + ), + ( + "uint16", + "GSUBLookupIndex", + "LookupCount", + 0, + "Array of LookupIndex identifiers in GSUB-in increasing numerical order", + ), + ], + ), + ( + "JstfGPOSModList", + [ + ( + "uint16", + "LookupCount", + None, + None, + "Number of lookups for this modification", + ), + ( + "uint16", + "GPOSLookupIndex", + "LookupCount", + 0, + "Array of LookupIndex identifiers in GPOS-in increasing numerical order", + ), + ], + ), + ( + "JstfMax", + [ + ( + "uint16", + "LookupCount", + None, + None, + "Number of lookup Indices for this modification", + ), + ( + "Offset", + "Lookup", + "LookupCount", + 0, + "Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order", + ), + ], + ), + # + # STAT + # + ( + "STAT", + [ + ( + "Version", + "Version", + None, + None, + "Version of the table-initially set to 0x00010000, currently 0x00010002.", + ), + ( + "uint16", + "DesignAxisRecordSize", + None, + None, + "Size in bytes of each design axis record", + ), + ("uint16", "DesignAxisCount", None, None, "Number of design axis records"), + ( + "LOffsetTo(AxisRecordArray)", + "DesignAxisRecord", + None, + None, + "Offset in bytes from the beginning of the STAT table to the start of the design axes array", + ), + ("uint16", "AxisValueCount", None, None, "Number of axis value tables"), + ( + "LOffsetTo(AxisValueArray)", + "AxisValueArray", + None, + None, + "Offset in bytes from the beginning of the STAT table to the start of the axes value offset array", + ), + ( + "NameID", + "ElidedFallbackNameID", + None, + "Version >= 0x00010001", + "NameID to use when all style attributes are elided.", + ), + ], + ), + ( + "AxisRecordArray", + [ + ("AxisRecord", "Axis", "DesignAxisCount", 0, "Axis records"), + ], + ), + ( + "AxisRecord", + [ + ( + "Tag", + "AxisTag", + None, + None, + "A tag identifying the axis of design variation", + ), + ( + "NameID", + "AxisNameID", + None, + None, + 'The name ID for entries in the "name" table that provide a display string for this axis', + ), + ( + "uint16", + "AxisOrdering", + None, + None, + "A value that applications can use to determine primary sorting of face names, or for ordering of descriptors when composing family or face names", + ), + ( + "uint8", + "MoreBytes", + "DesignAxisRecordSize", + -8, + "Extra bytes. Set to empty array.", + ), + ], + ), + ( + "AxisValueArray", + [ + ("Offset", "AxisValue", "AxisValueCount", 0, "Axis values"), + ], + ), + ( + "AxisValueFormat1", + [ + ("uint16", "Format", None, None, "Format, = 1"), + ( + "uint16", + "AxisIndex", + None, + None, + "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", + ), + ("STATFlags", "Flags", None, None, "Flags."), + ("NameID", "ValueNameID", None, None, ""), + ("Fixed", "Value", None, None, ""), + ], + ), + ( + "AxisValueFormat2", + [ + ("uint16", "Format", None, None, "Format, = 2"), + ( + "uint16", + "AxisIndex", + None, + None, + "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", + ), + ("STATFlags", "Flags", None, None, "Flags."), + ("NameID", "ValueNameID", None, None, ""), + ("Fixed", "NominalValue", None, None, ""), + ("Fixed", "RangeMinValue", None, None, ""), + ("Fixed", "RangeMaxValue", None, None, ""), + ], + ), + ( + "AxisValueFormat3", + [ + ("uint16", "Format", None, None, "Format, = 3"), + ( + "uint16", + "AxisIndex", + None, + None, + "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", + ), + ("STATFlags", "Flags", None, None, "Flags."), + ("NameID", "ValueNameID", None, None, ""), + ("Fixed", "Value", None, None, ""), + ("Fixed", "LinkedValue", None, None, ""), + ], + ), + ( + "AxisValueFormat4", + [ + ("uint16", "Format", None, None, "Format, = 4"), + ( + "uint16", + "AxisCount", + None, + None, + "The total number of axes contributing to this axis-values combination.", + ), + ("STATFlags", "Flags", None, None, "Flags."), + ("NameID", "ValueNameID", None, None, ""), + ( + "struct", + "AxisValueRecord", + "AxisCount", + 0, + "Array of AxisValue records that provide the combination of axis values, one for each contributing axis. ", + ), + ], + ), + ( + "AxisValueRecord", + [ + ( + "uint16", + "AxisIndex", + None, + None, + "Index into the axis record array identifying the axis of design variation to which the axis value record applies.", + ), + ("Fixed", "Value", None, None, "A numeric value for this attribute value."), + ], + ), + # + # Variation fonts + # + # GSUB/GPOS FeatureVariations + ( + "FeatureVariations", + [ + ( + "Version", + "Version", + None, + None, + "Version of the table-initially set to 0x00010000", + ), + ( + "uint32", + "FeatureVariationCount", + None, + None, + "Number of records in the FeatureVariationRecord array", + ), + ( + "struct", + "FeatureVariationRecord", + "FeatureVariationCount", + 0, + "Array of FeatureVariationRecord", + ), + ], + ), + ( + "FeatureVariationRecord", + [ + ( + "LOffset", + "ConditionSet", + None, + None, + "Offset to a ConditionSet table, from beginning of the FeatureVariations table.", + ), + ( + "LOffset", + "FeatureTableSubstitution", + None, + None, + "Offset to a FeatureTableSubstitution table, from beginning of the FeatureVariations table", + ), + ], + ), + ( + "ConditionList", + [ + ( + "uint32", + "ConditionCount", + None, + None, + "Number of condition tables in the ConditionTable array", + ), + ( + "LOffset", + "ConditionTable", + "ConditionCount", + 0, + "Array of offset to condition tables, from the beginning of the ConditionList table.", + ), + ], + ), + ( + "ConditionSet", + [ + ( + "uint16", + "ConditionCount", + None, + None, + "Number of condition tables in the ConditionTable array", + ), + ( + "LOffset", + "ConditionTable", + "ConditionCount", + 0, + "Array of offset to condition tables, from the beginning of the ConditionSet table.", + ), + ], + ), + ( + "ConditionTableFormat1", + [ + ("uint16", "Format", None, None, "Format, = 1"), + ( + "uint16", + "AxisIndex", + None, + None, + "Index for the variation axis within the fvar table, base 0.", + ), + ( + "F2Dot14", + "FilterRangeMinValue", + None, + None, + "Minimum normalized axis value of the font variation instances that satisfy this condition.", + ), + ( + "F2Dot14", + "FilterRangeMaxValue", + None, + None, + "Maximum value that satisfies this condition.", + ), + ], + ), + ( + "ConditionTableFormat2", + [ + ("uint16", "Format", None, None, "Format, = 2"), + ( + "int16", + "DefaultValue", + None, + None, + "Value at default instance.", + ), + ( + "uint32", + "VarIdx", + None, + None, + "Variation index to vary the value based on current designspace location.", + ), + ], + ), + ( + "ConditionTableFormat3", + [ + ("uint16", "Format", None, None, "Format, = 3"), + ( + "uint8", + "ConditionCount", + None, + None, + "Index for the variation axis within the fvar table, base 0.", + ), + ( + "Offset24", + "ConditionTable", + "ConditionCount", + 0, + "Array of condition tables for this conjunction (AND) expression.", + ), + ], + ), + ( + "ConditionTableFormat4", + [ + ("uint16", "Format", None, None, "Format, = 4"), + ( + "uint8", + "ConditionCount", + None, + None, + "Index for the variation axis within the fvar table, base 0.", + ), + ( + "Offset24", + "ConditionTable", + "ConditionCount", + 0, + "Array of condition tables for this disjunction (OR) expression.", + ), + ], + ), + ( + "ConditionTableFormat5", + [ + ("uint16", "Format", None, None, "Format, = 5"), + ( + "Offset24", + "ConditionTable", + None, + None, + "Condition to negate.", + ), + ], + ), + ( + "FeatureTableSubstitution", + [ + ( + "Version", + "Version", + None, + None, + "Version of the table-initially set to 0x00010000", + ), + ( + "uint16", + "SubstitutionCount", + None, + None, + "Number of records in the FeatureVariationRecords array", + ), + ( + "FeatureTableSubstitutionRecord", + "SubstitutionRecord", + "SubstitutionCount", + 0, + "Array of FeatureTableSubstitutionRecord", + ), + ], + ), + ( + "FeatureTableSubstitutionRecord", + [ + ("uint16", "FeatureIndex", None, None, "The feature table index to match."), + ( + "LOffset", + "Feature", + None, + None, + "Offset to an alternate feature table, from start of the FeatureTableSubstitution table.", + ), + ], + ), + # VariationStore + ( + "VarRegionAxis", + [ + ("F2Dot14", "StartCoord", None, None, ""), + ("F2Dot14", "PeakCoord", None, None, ""), + ("F2Dot14", "EndCoord", None, None, ""), + ], + ), + ( + "VarRegion", + [ + ("struct", "VarRegionAxis", "RegionAxisCount", 0, ""), + ], + ), + ( + "VarRegionList", + [ + ("uint16", "RegionAxisCount", None, None, ""), + ("uint16", "RegionCount", None, None, ""), + ("VarRegion", "Region", "RegionCount", 0, ""), + ], + ), + ( + "VarData", + [ + ("uint16", "ItemCount", None, None, ""), + ("uint16", "NumShorts", None, None, ""), + ("uint16", "VarRegionCount", None, None, ""), + ("uint16", "VarRegionIndex", "VarRegionCount", 0, ""), + ("VarDataValue", "Item", "ItemCount", 0, ""), + ], + ), + ( + "VarStore", + [ + ("uint16", "Format", None, None, "Set to 1."), + ("LOffset", "VarRegionList", None, None, ""), + ("uint16", "VarDataCount", None, None, ""), + ("LOffset", "VarData", "VarDataCount", 0, ""), + ], + ), + # Variation helpers + ( + "VarIdxMap", + [ + ("uint16", "EntryFormat", None, None, ""), # Automatically computed + ("uint16", "MappingCount", None, None, ""), # Automatically computed + ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), + ], + ), + ( + "DeltaSetIndexMapFormat0", + [ + ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 0"), + ("uint8", "EntryFormat", None, None, ""), # Automatically computed + ("uint16", "MappingCount", None, None, ""), # Automatically computed + ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), + ], + ), + ( + "DeltaSetIndexMapFormat1", + [ + ("uint8", "Format", None, None, "Format of the DeltaSetIndexMap = 1"), + ("uint8", "EntryFormat", None, None, ""), # Automatically computed + ("uint32", "MappingCount", None, None, ""), # Automatically computed + ("VarIdxMapValue", "mapping", "", 0, "Array of compressed data"), + ], + ), + # MultiVariationStore + ( + "SparseVarRegionAxis", + [ + ("uint16", "AxisIndex", None, None, ""), + ("F2Dot14", "StartCoord", None, None, ""), + ("F2Dot14", "PeakCoord", None, None, ""), + ("F2Dot14", "EndCoord", None, None, ""), + ], + ), + ( + "SparseVarRegion", + [ + ("uint16", "SparseRegionCount", None, None, ""), + ("struct", "SparseVarRegionAxis", "SparseRegionCount", 0, ""), + ], + ), + ( + "SparseVarRegionList", + [ + ("uint16", "RegionCount", None, None, ""), + ("LOffsetTo(SparseVarRegion)", "Region", "RegionCount", 0, ""), + ], + ), + ( + "MultiVarData", + [ + ("uint8", "Format", None, None, "Set to 1."), + ("uint16", "VarRegionCount", None, None, ""), + ("uint16", "VarRegionIndex", "VarRegionCount", 0, ""), + ("TupleList", "Item", "", 0, ""), + ], + ), + ( + "MultiVarStore", + [ + ("uint16", "Format", None, None, "Set to 1."), + ("LOffset", "SparseVarRegionList", None, None, ""), + ("uint16", "MultiVarDataCount", None, None, ""), + ("LOffset", "MultiVarData", "MultiVarDataCount", 0, ""), + ], + ), + # VariableComposites + ( + "VARC", + [ + ( + "Version", + "Version", + None, + None, + "Version of the HVAR table-initially = 0x00010000", + ), + ("LOffset", "Coverage", None, None, ""), + ("LOffset", "MultiVarStore", None, None, "(may be NULL)"), + ("LOffset", "ConditionList", None, None, "(may be NULL)"), + ("LOffset", "AxisIndicesList", None, None, "(may be NULL)"), + ("LOffset", "VarCompositeGlyphs", None, None, ""), + ], + ), + ( + "AxisIndicesList", + [ + ("TupleList", "Item", "", 0, ""), + ], + ), + ( + "VarCompositeGlyphs", + [ + ("VarCompositeGlyphList", "VarCompositeGlyph", "", None, ""), + ], + ), + # Glyph advance variations + ( + "HVAR", + [ + ( + "Version", + "Version", + None, + None, + "Version of the HVAR table-initially = 0x00010000", + ), + ("LOffset", "VarStore", None, None, ""), + ("LOffsetTo(VarIdxMap)", "AdvWidthMap", None, None, ""), + ("LOffsetTo(VarIdxMap)", "LsbMap", None, None, ""), + ("LOffsetTo(VarIdxMap)", "RsbMap", None, None, ""), + ], + ), + ( + "VVAR", + [ + ( + "Version", + "Version", + None, + None, + "Version of the VVAR table-initially = 0x00010000", + ), + ("LOffset", "VarStore", None, None, ""), + ("LOffsetTo(VarIdxMap)", "AdvHeightMap", None, None, ""), + ("LOffsetTo(VarIdxMap)", "TsbMap", None, None, ""), + ("LOffsetTo(VarIdxMap)", "BsbMap", None, None, ""), + ("LOffsetTo(VarIdxMap)", "VOrgMap", None, None, "Vertical origin mapping."), + ], + ), + # Font-wide metrics variations + ( + "MetricsValueRecord", + [ + ("Tag", "ValueTag", None, None, "4-byte font-wide measure identifier"), + ("uint32", "VarIdx", None, None, "Combined outer-inner variation index"), + ( + "uint8", + "MoreBytes", + "ValueRecordSize", + -8, + "Extra bytes. Set to empty array.", + ), + ], + ), + ( + "MVAR", + [ + ( + "Version", + "Version", + None, + None, + "Version of the MVAR table-initially = 0x00010000", + ), + ("uint16", "Reserved", None, None, "Set to 0"), + ("uint16", "ValueRecordSize", None, None, ""), + ("uint16", "ValueRecordCount", None, None, ""), + ("Offset", "VarStore", None, None, ""), + ("MetricsValueRecord", "ValueRecord", "ValueRecordCount", 0, ""), + ], + ), + # + # math + # + ( + "MATH", + [ + ( + "Version", + "Version", + None, + None, + "Version of the MATH table-initially set to 0x00010000.", + ), + ( + "Offset", + "MathConstants", + None, + None, + "Offset to MathConstants table - from the beginning of MATH table.", + ), + ( + "Offset", + "MathGlyphInfo", + None, + None, + "Offset to MathGlyphInfo table - from the beginning of MATH table.", + ), + ( + "Offset", + "MathVariants", + None, + None, + "Offset to MathVariants table - from the beginning of MATH table.", + ), + ], + ), + ( + "MathValueRecord", + [ + ("int16", "Value", None, None, "The X or Y value in design units."), + ( + "Offset", + "DeviceTable", + None, + None, + "Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.", + ), + ], + ), + ( + "MathConstants", + [ + ( + "int16", + "ScriptPercentScaleDown", + None, + None, + "Percentage of scaling down for script level 1. Suggested value: 80%.", + ), + ( + "int16", + "ScriptScriptPercentScaleDown", + None, + None, + "Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.", + ), + ( + "uint16", + "DelimitedSubFormulaMinHeight", + None, + None, + "Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.", + ), + ( + "uint16", + "DisplayOperatorMinHeight", + None, + None, + "Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.", + ), + ( + "MathValueRecord", + "MathLeading", + None, + None, + "White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.", + ), + ("MathValueRecord", "AxisHeight", None, None, "Axis height of the font."), + ( + "MathValueRecord", + "AccentBaseHeight", + None, + None, + "Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.", + ), + ( + "MathValueRecord", + "FlattenedAccentBaseHeight", + None, + None, + "Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).", + ), + ( + "MathValueRecord", + "SubscriptShiftDown", + None, + None, + "The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.", + ), + ( + "MathValueRecord", + "SubscriptTopMax", + None, + None, + "Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.", + ), + ( + "MathValueRecord", + "SubscriptBaselineDropMin", + None, + None, + "Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.", + ), + ( + "MathValueRecord", + "SuperscriptShiftUp", + None, + None, + "Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.", + ), + ( + "MathValueRecord", + "SuperscriptShiftUpCramped", + None, + None, + "Standard shift of superscripts relative to the base, in cramped style.", + ), + ( + "MathValueRecord", + "SuperscriptBottomMin", + None, + None, + "Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.", + ), + ( + "MathValueRecord", + "SuperscriptBaselineDropMax", + None, + None, + "Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.", + ), + ( + "MathValueRecord", + "SubSuperscriptGapMin", + None, + None, + "Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.", + ), + ( + "MathValueRecord", + "SuperscriptBottomMaxWithSubscript", + None, + None, + "The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.", + ), + ( + "MathValueRecord", + "SpaceAfterScript", + None, + None, + "Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.", + ), + ( + "MathValueRecord", + "UpperLimitGapMin", + None, + None, + "Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.", + ), + ( + "MathValueRecord", + "UpperLimitBaselineRiseMin", + None, + None, + "Minimum distance between baseline of upper limit and (ink) top of the base operator.", + ), + ( + "MathValueRecord", + "LowerLimitGapMin", + None, + None, + "Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.", + ), + ( + "MathValueRecord", + "LowerLimitBaselineDropMin", + None, + None, + "Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.", + ), + ( + "MathValueRecord", + "StackTopShiftUp", + None, + None, + "Standard shift up applied to the top element of a stack.", + ), + ( + "MathValueRecord", + "StackTopDisplayStyleShiftUp", + None, + None, + "Standard shift up applied to the top element of a stack in display style.", + ), + ( + "MathValueRecord", + "StackBottomShiftDown", + None, + None, + "Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.", + ), + ( + "MathValueRecord", + "StackBottomDisplayStyleShiftDown", + None, + None, + "Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.", + ), + ( + "MathValueRecord", + "StackGapMin", + None, + None, + "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.", + ), + ( + "MathValueRecord", + "StackDisplayStyleGapMin", + None, + None, + "Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.", + ), + ( + "MathValueRecord", + "StretchStackTopShiftUp", + None, + None, + "Standard shift up applied to the top element of the stretch stack.", + ), + ( + "MathValueRecord", + "StretchStackBottomShiftDown", + None, + None, + "Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.", + ), + ( + "MathValueRecord", + "StretchStackGapAboveMin", + None, + None, + "Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin", + ), + ( + "MathValueRecord", + "StretchStackGapBelowMin", + None, + None, + "Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.", + ), + ( + "MathValueRecord", + "FractionNumeratorShiftUp", + None, + None, + "Standard shift up applied to the numerator.", + ), + ( + "MathValueRecord", + "FractionNumeratorDisplayStyleShiftUp", + None, + None, + "Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.", + ), + ( + "MathValueRecord", + "FractionDenominatorShiftDown", + None, + None, + "Standard shift down applied to the denominator. Positive for moving in the downward direction.", + ), + ( + "MathValueRecord", + "FractionDenominatorDisplayStyleShiftDown", + None, + None, + "Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.", + ), + ( + "MathValueRecord", + "FractionNumeratorGapMin", + None, + None, + "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness", + ), + ( + "MathValueRecord", + "FractionNumDisplayStyleGapMin", + None, + None, + "Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.", + ), + ( + "MathValueRecord", + "FractionRuleThickness", + None, + None, + "Thickness of the fraction bar. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "FractionDenominatorGapMin", + None, + None, + "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness", + ), + ( + "MathValueRecord", + "FractionDenomDisplayStyleGapMin", + None, + None, + "Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.", + ), + ( + "MathValueRecord", + "SkewedFractionHorizontalGap", + None, + None, + "Horizontal distance between the top and bottom elements of a skewed fraction.", + ), + ( + "MathValueRecord", + "SkewedFractionVerticalGap", + None, + None, + "Vertical distance between the ink of the top and bottom elements of a skewed fraction.", + ), + ( + "MathValueRecord", + "OverbarVerticalGap", + None, + None, + "Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.", + ), + ( + "MathValueRecord", + "OverbarRuleThickness", + None, + None, + "Thickness of overbar. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "OverbarExtraAscender", + None, + None, + "Extra white space reserved above the overbar. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "UnderbarVerticalGap", + None, + None, + "Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.", + ), + ( + "MathValueRecord", + "UnderbarRuleThickness", + None, + None, + "Thickness of underbar. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "UnderbarExtraDescender", + None, + None, + "Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "RadicalVerticalGap", + None, + None, + "Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.", + ), + ( + "MathValueRecord", + "RadicalDisplayStyleVerticalGap", + None, + None, + "Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.", + ), + ( + "MathValueRecord", + "RadicalRuleThickness", + None, + None, + "Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.", + ), + ( + "MathValueRecord", + "RadicalExtraAscender", + None, + None, + "Extra white space reserved above the radical. Suggested: RadicalRuleThickness.", + ), + ( + "MathValueRecord", + "RadicalKernBeforeDegree", + None, + None, + "Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.", + ), + ( + "MathValueRecord", + "RadicalKernAfterDegree", + None, + None, + "Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.", + ), + ( + "uint16", + "RadicalDegreeBottomRaisePercent", + None, + None, + "Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.", + ), + ], + ), + ( + "MathGlyphInfo", + [ + ( + "Offset", + "MathItalicsCorrectionInfo", + None, + None, + "Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.", + ), + ( + "Offset", + "MathTopAccentAttachment", + None, + None, + "Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.", + ), + ( + "Offset", + "ExtendedShapeCoverage", + None, + None, + "Offset to coverage table for Extended Shape glyphs - from the beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.", + ), + ( + "Offset", + "MathKernInfo", + None, + None, + "Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.", + ), + ], + ), + ( + "MathItalicsCorrectionInfo", + [ + ( + "Offset", + "Coverage", + None, + None, + "Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.", + ), + ( + "uint16", + "ItalicsCorrectionCount", + None, + None, + "Number of italics correction values. Should coincide with the number of covered glyphs.", + ), + ( + "MathValueRecord", + "ItalicsCorrection", + "ItalicsCorrectionCount", + 0, + "Array of MathValueRecords defining italics correction values for each covered glyph.", + ), + ], + ), + ( + "MathTopAccentAttachment", + [ + ( + "Offset", + "TopAccentCoverage", + None, + None, + "Offset to Coverage table - from the beginning of MathTopAccentAttachment table.", + ), + ( + "uint16", + "TopAccentAttachmentCount", + None, + None, + "Number of top accent attachment point values. Should coincide with the number of covered glyphs", + ), + ( + "MathValueRecord", + "TopAccentAttachment", + "TopAccentAttachmentCount", + 0, + "Array of MathValueRecords defining top accent attachment points for each covered glyph", + ), + ], + ), + ( + "MathKernInfo", + [ + ( + "Offset", + "MathKernCoverage", + None, + None, + "Offset to Coverage table - from the beginning of the MathKernInfo table.", + ), + ("uint16", "MathKernCount", None, None, "Number of MathKernInfoRecords."), + ( + "MathKernInfoRecord", + "MathKernInfoRecords", + "MathKernCount", + 0, + "Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.", + ), + ], + ), + ( + "MathKernInfoRecord", + [ + ( + "Offset", + "TopRightMathKern", + None, + None, + "Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.", + ), + ( + "Offset", + "TopLeftMathKern", + None, + None, + "Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.", + ), + ( + "Offset", + "BottomRightMathKern", + None, + None, + "Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.", + ), + ( + "Offset", + "BottomLeftMathKern", + None, + None, + "Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.", + ), + ], + ), + ( + "MathKern", + [ + ( + "uint16", + "HeightCount", + None, + None, + "Number of heights on which the kern value changes.", + ), + ( + "MathValueRecord", + "CorrectionHeight", + "HeightCount", + 0, + "Array of correction heights at which the kern value changes. Sorted by the height value in design units.", + ), + ( + "MathValueRecord", + "KernValue", + "HeightCount", + 1, + "Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.", + ), + ], + ), + ( + "MathVariants", + [ + ( + "uint16", + "MinConnectorOverlap", + None, + None, + "Minimum overlap of connecting glyphs during glyph construction, in design units.", + ), + ( + "Offset", + "VertGlyphCoverage", + None, + None, + "Offset to Coverage table - from the beginning of MathVariants table.", + ), + ( + "Offset", + "HorizGlyphCoverage", + None, + None, + "Offset to Coverage table - from the beginning of MathVariants table.", + ), + ( + "uint16", + "VertGlyphCount", + None, + None, + "Number of glyphs for which information is provided for vertically growing variants.", + ), + ( + "uint16", + "HorizGlyphCount", + None, + None, + "Number of glyphs for which information is provided for horizontally growing variants.", + ), + ( + "Offset", + "VertGlyphConstruction", + "VertGlyphCount", + 0, + "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.", + ), + ( + "Offset", + "HorizGlyphConstruction", + "HorizGlyphCount", + 0, + "Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.", + ), + ], + ), + ( + "MathGlyphConstruction", + [ + ( + "Offset", + "GlyphAssembly", + None, + None, + "Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL", + ), + ( + "uint16", + "VariantCount", + None, + None, + "Count of glyph growing variants for this glyph.", + ), + ( + "MathGlyphVariantRecord", + "MathGlyphVariantRecord", + "VariantCount", + 0, + "MathGlyphVariantRecords for alternative variants of the glyphs.", + ), + ], + ), + ( + "MathGlyphVariantRecord", + [ + ("GlyphID", "VariantGlyph", None, None, "Glyph ID for the variant."), + ( + "uint16", + "AdvanceMeasurement", + None, + None, + "Advance width/height, in design units, of the variant, in the direction of requested glyph extension.", + ), + ], + ), + ( + "GlyphAssembly", + [ + ( + "MathValueRecord", + "ItalicsCorrection", + None, + None, + "Italics correction of this GlyphAssembly. Should not depend on the assembly size.", + ), + ("uint16", "PartCount", None, None, "Number of parts in this assembly."), + ( + "GlyphPartRecord", + "PartRecords", + "PartCount", + 0, + "Array of part records, from left to right and bottom to top.", + ), + ], + ), + ( + "GlyphPartRecord", + [ + ("GlyphID", "glyph", None, None, "Glyph ID for the part."), + ( + "uint16", + "StartConnectorLength", + None, + None, + "Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.", + ), + ( + "uint16", + "EndConnectorLength", + None, + None, + "Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.", + ), + ( + "uint16", + "FullAdvance", + None, + None, + "Full advance width/height for this part, in the direction of the extension. In design units.", + ), + ( + "uint16", + "PartFlags", + None, + None, + "Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved", + ), + ], + ), + ## + ## Apple Advanced Typography (AAT) tables + ## + ( + "AATLookupSegment", + [ + ("uint16", "lastGlyph", None, None, "Last glyph index in this segment."), + ("uint16", "firstGlyph", None, None, "First glyph index in this segment."), + ( + "uint16", + "value", + None, + None, + "A 16-bit offset from the start of the table to the data.", + ), + ], + ), + # + # ankr + # + ( + "ankr", + [ + ("struct", "AnchorPoints", None, None, "Anchor points table."), + ], + ), + ( + "AnchorPointsFormat0", + [ + ("uint16", "Format", None, None, "Format of the anchor points table, = 0."), + ("uint16", "Flags", None, None, "Flags. Currenty unused, set to zero."), + ( + "AATLookupWithDataOffset(AnchorGlyphData)", + "Anchors", + None, + None, + "Table of with anchor overrides for each glyph.", + ), + ], + ), + ( + "AnchorGlyphData", + [ + ( + "uint32", + "AnchorPointCount", + None, + None, + "Number of anchor points for this glyph.", + ), + ( + "struct", + "AnchorPoint", + "AnchorPointCount", + 0, + "Individual anchor points.", + ), + ], + ), + ( + "AnchorPoint", + [ + ("int16", "XCoordinate", None, None, "X coordinate of this anchor point."), + ("int16", "YCoordinate", None, None, "Y coordinate of this anchor point."), + ], + ), + # + # bsln + # + ( + "bsln", + [ + ( + "Version", + "Version", + None, + None, + "Version number of the AAT baseline table (0x00010000 for the initial version).", + ), + ("struct", "Baseline", None, None, "Baseline table."), + ], + ), + ( + "BaselineFormat0", + [ + ("uint16", "Format", None, None, "Format of the baseline table, = 0."), + ( + "uint16", + "DefaultBaseline", + None, + None, + "Default baseline value for all glyphs. This value can be from 0 through 31.", + ), + ( + "uint16", + "Delta", + 32, + 0, + "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.", + ), + ], + ), + ( + "BaselineFormat1", + [ + ("uint16", "Format", None, None, "Format of the baseline table, = 1."), + ( + "uint16", + "DefaultBaseline", + None, + None, + "Default baseline value for all glyphs. This value can be from 0 through 31.", + ), + ( + "uint16", + "Delta", + 32, + 0, + "These are the FUnit distance deltas from the font’s natural baseline to the other baselines used in the font. A total of 32 deltas must be assigned.", + ), + ( + "AATLookup(uint16)", + "BaselineValues", + None, + None, + "Lookup table that maps glyphs to their baseline values.", + ), + ], + ), + ( + "BaselineFormat2", + [ + ("uint16", "Format", None, None, "Format of the baseline table, = 1."), + ( + "uint16", + "DefaultBaseline", + None, + None, + "Default baseline value for all glyphs. This value can be from 0 through 31.", + ), + ( + "GlyphID", + "StandardGlyph", + None, + None, + "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.", + ), + ( + "uint16", + "ControlPoint", + 32, + 0, + "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.", + ), + ], + ), + ( + "BaselineFormat3", + [ + ("uint16", "Format", None, None, "Format of the baseline table, = 1."), + ( + "uint16", + "DefaultBaseline", + None, + None, + "Default baseline value for all glyphs. This value can be from 0 through 31.", + ), + ( + "GlyphID", + "StandardGlyph", + None, + None, + "Glyph index of the glyph in this font to be used to set the baseline values. This glyph must contain a set of control points (whose numbers are contained in the following field) that determines baseline distances.", + ), + ( + "uint16", + "ControlPoint", + 32, + 0, + "Array of 32 control point numbers, associated with the standard glyph. A value of 0xFFFF means there is no corresponding control point in the standard glyph.", + ), + ( + "AATLookup(uint16)", + "BaselineValues", + None, + None, + "Lookup table that maps glyphs to their baseline values.", + ), + ], + ), + # + # cidg + # + ( + "cidg", + [ + ("struct", "CIDGlyphMapping", None, None, "CID-to-glyph mapping table."), + ], + ), + ( + "CIDGlyphMappingFormat0", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the CID-to-glyph mapping table, = 0.", + ), + ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."), + ("uint32", "StructLength", None, None, "Size of the table in bytes."), + ("uint16", "Registry", None, None, "The registry ID."), + ( + "char64", + "RegistryName", + None, + None, + "The registry name in ASCII; unused bytes should be set to 0.", + ), + ("uint16", "Order", None, None, "The order ID."), + ( + "char64", + "OrderName", + None, + None, + "The order name in ASCII; unused bytes should be set to 0.", + ), + ("uint16", "SupplementVersion", None, None, "The supplement version."), + ( + "CIDGlyphMap", + "Mapping", + None, + None, + "A mapping from CIDs to the glyphs in the font, starting with CID 0. If a CID from the identified collection has no glyph in the font, 0xFFFF is used", + ), + ], + ), + # + # feat + # + ( + "feat", + [ + ( + "Version", + "Version", + None, + None, + "Version of the feat table-initially set to 0x00010000.", + ), + ("FeatureNames", "FeatureNames", None, None, "The feature names."), + ], + ), + ( + "FeatureNames", + [ + ( + "uint16", + "FeatureNameCount", + None, + None, + "Number of entries in the feature name array.", + ), + ("uint16", "Reserved1", None, None, "Reserved (set to zero)."), + ("uint32", "Reserved2", None, None, "Reserved (set to zero)."), + ( + "FeatureName", + "FeatureName", + "FeatureNameCount", + 0, + "The feature name array.", + ), + ], + ), + ( + "FeatureName", + [ + ("uint16", "FeatureType", None, None, "Feature type."), + ( + "uint16", + "SettingsCount", + None, + None, + "The number of records in the setting name array.", + ), + ( + "LOffset", + "Settings", + None, + None, + "Offset to setting table for this feature.", + ), + ( + "uint16", + "FeatureFlags", + None, + None, + "Single-bit flags associated with the feature type.", + ), + ( + "NameID", + "FeatureNameID", + None, + None, + "The name table index for the feature name.", + ), + ], + ), + ( + "Settings", + [ + ("Setting", "Setting", "SettingsCount", 0, "The setting array."), + ], + ), + ( + "Setting", + [ + ("uint16", "SettingValue", None, None, "The setting."), + ( + "NameID", + "SettingNameID", + None, + None, + "The name table index for the setting name.", + ), + ], + ), + # + # gcid + # + ( + "gcid", + [ + ("struct", "GlyphCIDMapping", None, None, "Glyph to CID mapping table."), + ], + ), + ( + "GlyphCIDMappingFormat0", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the glyph-to-CID mapping table, = 0.", + ), + ("uint16", "DataFormat", None, None, "Currenty unused, set to zero."), + ("uint32", "StructLength", None, None, "Size of the table in bytes."), + ("uint16", "Registry", None, None, "The registry ID."), + ( + "char64", + "RegistryName", + None, + None, + "The registry name in ASCII; unused bytes should be set to 0.", + ), + ("uint16", "Order", None, None, "The order ID."), + ( + "char64", + "OrderName", + None, + None, + "The order name in ASCII; unused bytes should be set to 0.", + ), + ("uint16", "SupplementVersion", None, None, "The supplement version."), + ( + "GlyphCIDMap", + "Mapping", + None, + None, + "The CIDs for the glyphs in the font, starting with glyph 0. If a glyph does not correspond to a CID in the identified collection, 0xFFFF is used", + ), + ], + ), + # + # lcar + # + ( + "lcar", + [ + ( + "Version", + "Version", + None, + None, + "Version number of the ligature caret table (0x00010000 for the initial version).", + ), + ("struct", "LigatureCarets", None, None, "Ligature carets table."), + ], + ), + ( + "LigatureCaretsFormat0", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.", + ), + ( + "AATLookup(LigCaretDistances)", + "Carets", + None, + None, + "Lookup table associating ligature glyphs with their caret positions, in font unit distances.", + ), + ], + ), + ( + "LigatureCaretsFormat1", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the ligature caret table. Format 0 indicates division points are distances in font units, Format 1 indicates division points are indexes of control points.", + ), + ( + "AATLookup(LigCaretPoints)", + "Carets", + None, + None, + "Lookup table associating ligature glyphs with their caret positions, as control points.", + ), + ], + ), + ( + "LigCaretDistances", + [ + ("uint16", "DivsionPointCount", None, None, "Number of division points."), + ( + "int16", + "DivisionPoint", + "DivsionPointCount", + 0, + "Distance in font units through which a subdivision is made orthogonally to the baseline.", + ), + ], + ), + ( + "LigCaretPoints", + [ + ("uint16", "DivsionPointCount", None, None, "Number of division points."), + ( + "int16", + "DivisionPoint", + "DivsionPointCount", + 0, + "The number of the control point through which a subdivision is made orthogonally to the baseline.", + ), + ], + ), + # + # mort + # + ( + "mort", + [ + ("Version", "Version", None, None, "Version of the mort table."), + ( + "uint32", + "MorphChainCount", + None, + None, + "Number of metamorphosis chains.", + ), + ( + "MortChain", + "MorphChain", + "MorphChainCount", + 0, + "Array of metamorphosis chains.", + ), + ], + ), + ( + "MortChain", + [ + ( + "Flags32", + "DefaultFlags", + None, + None, + "The default specification for subtables.", + ), + ( + "uint32", + "StructLength", + None, + None, + "Total byte count, including this header; must be a multiple of 4.", + ), + ( + "uint16", + "MorphFeatureCount", + None, + None, + "Number of metamorphosis feature entries.", + ), + ( + "uint16", + "MorphSubtableCount", + None, + None, + "The number of subtables in the chain.", + ), + ( + "struct", + "MorphFeature", + "MorphFeatureCount", + 0, + "Array of metamorphosis features.", + ), + ( + "MortSubtable", + "MorphSubtable", + "MorphSubtableCount", + 0, + "Array of metamorphosis subtables.", + ), + ], + ), + ( + "MortSubtable", + [ + ( + "uint16", + "StructLength", + None, + None, + "Total subtable length, including this header.", + ), + ( + "uint8", + "CoverageFlags", + None, + None, + "Most significant byte of coverage flags.", + ), + ("uint8", "MorphType", None, None, "Subtable type."), + ( + "Flags32", + "SubFeatureFlags", + None, + None, + "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).", + ), + ("SubStruct", "SubStruct", None, None, "SubTable."), + ], + ), + # + # morx + # + ( + "morx", + [ + ("uint16", "Version", None, None, "Version of the morx table."), + ("uint16", "Reserved", None, None, "Reserved (set to zero)."), + ( + "uint32", + "MorphChainCount", + None, + None, + "Number of extended metamorphosis chains.", + ), + ( + "MorxChain", + "MorphChain", + "MorphChainCount", + 0, + "Array of extended metamorphosis chains.", + ), + ], + ), + ( + "MorxChain", + [ + ( + "Flags32", + "DefaultFlags", + None, + None, + "The default specification for subtables.", + ), + ( + "uint32", + "StructLength", + None, + None, + "Total byte count, including this header; must be a multiple of 4.", + ), + ( + "uint32", + "MorphFeatureCount", + None, + None, + "Number of feature subtable entries.", + ), + ( + "uint32", + "MorphSubtableCount", + None, + None, + "The number of subtables in the chain.", + ), + ( + "MorphFeature", + "MorphFeature", + "MorphFeatureCount", + 0, + "Array of metamorphosis features.", + ), + ( + "MorxSubtable", + "MorphSubtable", + "MorphSubtableCount", + 0, + "Array of extended metamorphosis subtables.", + ), + ], + ), + ( + "MorphFeature", + [ + ("uint16", "FeatureType", None, None, "The type of feature."), + ( + "uint16", + "FeatureSetting", + None, + None, + "The feature's setting (aka selector).", + ), + ( + "Flags32", + "EnableFlags", + None, + None, + "Flags for the settings that this feature and setting enables.", + ), + ( + "Flags32", + "DisableFlags", + None, + None, + "Complement of flags for the settings that this feature and setting disable.", + ), + ], + ), + # Apple TrueType Reference Manual, chapter “The ‘morx’ table”, + # section “Metamorphosis Subtables”. + # https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html + ( + "MorxSubtable", + [ + ( + "uint32", + "StructLength", + None, + None, + "Total subtable length, including this header.", + ), + ( + "uint8", + "CoverageFlags", + None, + None, + "Most significant byte of coverage flags.", + ), + ("uint16", "Reserved", None, None, "Unused."), + ("uint8", "MorphType", None, None, "Subtable type."), + ( + "Flags32", + "SubFeatureFlags", + None, + None, + "The 32-bit mask identifying which subtable this is (the subtable being executed if the AND of this value and the processed defaultFlags is nonzero).", + ), + ("SubStruct", "SubStruct", None, None, "SubTable."), + ], + ), + ( + "StateHeader", + [ + ( + "uint32", + "ClassCount", + None, + None, + "Number of classes, which is the number of 16-bit entry indices in a single line in the state array.", + ), + ( + "uint32", + "MorphClass", + None, + None, + "Offset from the start of this state table header to the start of the class table.", + ), + ( + "uint32", + "StateArrayOffset", + None, + None, + "Offset from the start of this state table header to the start of the state array.", + ), + ( + "uint32", + "EntryTableOffset", + None, + None, + "Offset from the start of this state table header to the start of the entry table.", + ), + ], + ), + ( + "RearrangementMorph", + [ + ( + "STXHeader(RearrangementMorphAction)", + "StateTable", + None, + None, + "Finite-state transducer table for indic rearrangement.", + ), + ], + ), + ( + "ContextualMorph", + [ + ( + "STXHeader(ContextualMorphAction)", + "StateTable", + None, + None, + "Finite-state transducer for contextual glyph substitution.", + ), + ], + ), + ( + "LigatureMorph", + [ + ( + "STXHeader(LigatureMorphAction)", + "StateTable", + None, + None, + "Finite-state transducer for ligature substitution.", + ), + ], + ), + ( + "NoncontextualMorph", + [ + ( + "AATLookup(GlyphID)", + "Substitution", + None, + None, + "The noncontextual glyph substitution table.", + ), + ], + ), + ( + "InsertionMorph", + [ + ( + "STXHeader(InsertionMorphAction)", + "StateTable", + None, + None, + "Finite-state transducer for glyph insertion.", + ), + ], + ), + ( + "MorphClass", + [ + ( + "uint16", + "FirstGlyph", + None, + None, + "Glyph index of the first glyph in the class table.", + ), + # ('uint16', 'GlyphCount', None, None, 'Number of glyphs in class table.'), + # ('uint8', 'GlyphClass', 'GlyphCount', 0, 'The class codes (indexed by glyph index minus firstGlyph). Class codes range from 0 to the value of stateSize minus 1.'), + ], + ), + # If the 'morx' table version is 3 or greater, then the last subtable in the chain is followed by a subtableGlyphCoverageArray, as described below. + # ('Offset', 'MarkGlyphSetsDef', None, 'round(Version*0x10000) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'), + # + # prop + # + ( + "prop", + [ + ( + "Fixed", + "Version", + None, + None, + "Version number of the AAT glyphs property table. Version 1.0 is the initial table version. Version 2.0, which is recognized by macOS 8.5 and later, adds support for the “attaches on right” bit. Version 3.0, which gets recognized by macOS X and iOS, adds support for the additional directional properties defined in Unicode 3.0.", + ), + ("struct", "GlyphProperties", None, None, "Glyph properties."), + ], + ), + ( + "GlyphPropertiesFormat0", + [ + ("uint16", "Format", None, None, "Format, = 0."), + ( + "uint16", + "DefaultProperties", + None, + None, + "Default properties applied to a glyph. Since there is no lookup table in prop format 0, the default properties get applied to every glyph in the font.", + ), + ], + ), + ( + "GlyphPropertiesFormat1", + [ + ("uint16", "Format", None, None, "Format, = 1."), + ( + "uint16", + "DefaultProperties", + None, + None, + "Default properties applied to a glyph if that glyph is not present in the Properties lookup table.", + ), + ( + "AATLookup(uint16)", + "Properties", + None, + None, + "Lookup data associating glyphs with their properties.", + ), + ], + ), + # + # opbd + # + ( + "opbd", + [ + ( + "Version", + "Version", + None, + None, + "Version number of the optical bounds table (0x00010000 for the initial version).", + ), + ("struct", "OpticalBounds", None, None, "Optical bounds table."), + ], + ), + ( + "OpticalBoundsFormat0", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the optical bounds table, = 0.", + ), + ( + "AATLookup(OpticalBoundsDeltas)", + "OpticalBoundsDeltas", + None, + None, + "Lookup table associating glyphs with their optical bounds, given as deltas in font units.", + ), + ], + ), + ( + "OpticalBoundsFormat1", + [ + ( + "uint16", + "Format", + None, + None, + "Format of the optical bounds table, = 1.", + ), + ( + "AATLookup(OpticalBoundsPoints)", + "OpticalBoundsPoints", + None, + None, + "Lookup table associating glyphs with their optical bounds, given as references to control points.", + ), + ], + ), + ( + "OpticalBoundsDeltas", + [ + ( + "int16", + "Left", + None, + None, + "Delta value for the left-side optical edge.", + ), + ("int16", "Top", None, None, "Delta value for the top-side optical edge."), + ( + "int16", + "Right", + None, + None, + "Delta value for the right-side optical edge.", + ), + ( + "int16", + "Bottom", + None, + None, + "Delta value for the bottom-side optical edge.", + ), + ], + ), + ( + "OpticalBoundsPoints", + [ + ( + "int16", + "Left", + None, + None, + "Control point index for the left-side optical edge, or -1 if this glyph has none.", + ), + ( + "int16", + "Top", + None, + None, + "Control point index for the top-side optical edge, or -1 if this glyph has none.", + ), + ( + "int16", + "Right", + None, + None, + "Control point index for the right-side optical edge, or -1 if this glyph has none.", + ), + ( + "int16", + "Bottom", + None, + None, + "Control point index for the bottom-side optical edge, or -1 if this glyph has none.", + ), + ], + ), + # + # TSIC + # + ( + "TSIC", + [ + ( + "Version", + "Version", + None, + None, + "Version of table initially set to 0x00010000.", + ), + ("uint16", "Flags", None, None, "TSIC flags - set to 0"), + ("uint16", "AxisCount", None, None, "Axis count from fvar"), + ("uint16", "RecordCount", None, None, "TSIC record count"), + ("uint16", "Reserved", None, None, "Set to 0"), + ("Tag", "AxisArray", "AxisCount", 0, "Array of axis tags in fvar order"), + ( + "LocationRecord", + "RecordLocations", + "RecordCount", + 0, + "Location in variation space of TSIC record", + ), + ("TSICRecord", "Record", "RecordCount", 0, "Array of TSIC records"), + ], + ), + ( + "LocationRecord", + [ + ("F2Dot14", "Axis", "AxisCount", 0, "Axis record"), + ], + ), + ( + "TSICRecord", + [ + ("uint16", "Flags", None, None, "Record flags - set to 0"), + ("uint16", "NumCVTEntries", None, None, "Number of CVT number value pairs"), + ("uint16", "NameLength", None, None, "Length of optional user record name"), + ("uint16", "NameArray", "NameLength", 0, "Unicode 16 name"), + ("uint16", "CVTArray", "NumCVTEntries", 0, "CVT number array"), + ("int16", "CVTValueArray", "NumCVTEntries", 0, "CVT value"), + ], + ), + # + # COLR + # + ( + "COLR", + [ + ("uint16", "Version", None, None, "Table version number (starts at 0)."), + ( + "uint16", + "BaseGlyphRecordCount", + None, + None, + "Number of Base Glyph Records.", + ), + ( + "LOffset", + "BaseGlyphRecordArray", + None, + None, + "Offset (from beginning of COLR table) to Base Glyph records.", + ), + ( + "LOffset", + "LayerRecordArray", + None, + None, + "Offset (from beginning of COLR table) to Layer Records.", + ), + ("uint16", "LayerRecordCount", None, None, "Number of Layer Records."), + ( + "LOffset", + "BaseGlyphList", + None, + "Version >= 1", + "Offset (from beginning of COLR table) to array of Version-1 Base Glyph records.", + ), + ( + "LOffset", + "LayerList", + None, + "Version >= 1", + "Offset (from beginning of COLR table) to LayerList.", + ), + ( + "LOffset", + "ClipList", + None, + "Version >= 1", + "Offset to ClipList table (may be NULL)", + ), + ( + "LOffsetTo(DeltaSetIndexMap)", + "VarIndexMap", + None, + "Version >= 1", + "Offset to DeltaSetIndexMap table (may be NULL)", + ), + ( + "LOffset", + "VarStore", + None, + "Version >= 1", + "Offset to variation store (may be NULL)", + ), + ], + ), + ( + "BaseGlyphRecordArray", + [ + ( + "BaseGlyphRecord", + "BaseGlyphRecord", + "BaseGlyphRecordCount", + 0, + "Base Glyph records.", + ), + ], + ), + ( + "BaseGlyphRecord", + [ + ( + "GlyphID", + "BaseGlyph", + None, + None, + "Glyph ID of reference glyph. This glyph is for reference only and is not rendered for color.", + ), + ( + "uint16", + "FirstLayerIndex", + None, + None, + "Index (from beginning of the Layer Records) to the layer record. There will be numLayers consecutive entries for this base glyph.", + ), + ( + "uint16", + "NumLayers", + None, + None, + "Number of color layers associated with this glyph.", + ), + ], + ), + ( + "LayerRecordArray", + [ + ("LayerRecord", "LayerRecord", "LayerRecordCount", 0, "Layer records."), + ], + ), + ( + "LayerRecord", + [ + ( + "GlyphID", + "LayerGlyph", + None, + None, + "Glyph ID of layer glyph (must be in z-order from bottom to top).", + ), + ( + "uint16", + "PaletteIndex", + None, + None, + "Index value to use with a selected color palette.", + ), + ], + ), + ( + "BaseGlyphList", + [ + ( + "uint32", + "BaseGlyphCount", + None, + None, + "Number of Version-1 Base Glyph records", + ), + ( + "struct", + "BaseGlyphPaintRecord", + "BaseGlyphCount", + 0, + "Array of Version-1 Base Glyph records", + ), + ], + ), + ( + "BaseGlyphPaintRecord", + [ + ("GlyphID", "BaseGlyph", None, None, "Glyph ID of reference glyph."), + ( + "LOffset", + "Paint", + None, + None, + "Offset (from beginning of BaseGlyphPaintRecord) to Paint, typically a PaintColrLayers.", + ), + ], + ), + ( + "LayerList", + [ + ("uint32", "LayerCount", None, None, "Number of Version-1 Layers"), + ( + "LOffset", + "Paint", + "LayerCount", + 0, + "Array of offsets to Paint tables, from the start of the LayerList table.", + ), + ], + ), + ( + "ClipListFormat1", + [ + ( + "uint8", + "Format", + None, + None, + "Format for ClipList with 16bit glyph IDs: 1", + ), + ("uint32", "ClipCount", None, None, "Number of Clip records."), + ( + "struct", + "ClipRecord", + "ClipCount", + 0, + "Array of Clip records sorted by glyph ID.", + ), + ], + ), + ( + "ClipRecord", + [ + ("uint16", "StartGlyphID", None, None, "First glyph ID in the range."), + ("uint16", "EndGlyphID", None, None, "Last glyph ID in the range."), + ("Offset24", "ClipBox", None, None, "Offset to a ClipBox table."), + ], + ), + ( + "ClipBoxFormat1", + [ + ( + "uint8", + "Format", + None, + None, + "Format for ClipBox without variation: set to 1.", + ), + ("int16", "xMin", None, None, "Minimum x of clip box."), + ("int16", "yMin", None, None, "Minimum y of clip box."), + ("int16", "xMax", None, None, "Maximum x of clip box."), + ("int16", "yMax", None, None, "Maximum y of clip box."), + ], + ), + ( + "ClipBoxFormat2", + [ + ("uint8", "Format", None, None, "Format for variable ClipBox: set to 2."), + ("int16", "xMin", None, None, "Minimum x of clip box. VarIndexBase + 0."), + ("int16", "yMin", None, None, "Minimum y of clip box. VarIndexBase + 1."), + ("int16", "xMax", None, None, "Maximum x of clip box. VarIndexBase + 2."), + ("int16", "yMax", None, None, "Maximum y of clip box. VarIndexBase + 3."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # COLRv1 Affine2x3 uses the same column-major order to serialize a 2D + # Affine Transformation as the one used by fontTools.misc.transform. + # However, for historical reasons, the labels 'xy' and 'yx' are swapped. + # Their fundamental meaning is the same though. + # COLRv1 Affine2x3 follows the names found in FreeType and Cairo. + # In all case, the second element in the 6-tuple correspond to the + # y-part of the x basis vector, and the third to the x-part of the y + # basis vector. + # See https://github.com/googlefonts/colr-gradients-spec/pull/85 + ( + "Affine2x3", + [ + ("Fixed", "xx", None, None, "x-part of x basis vector"), + ("Fixed", "yx", None, None, "y-part of x basis vector"), + ("Fixed", "xy", None, None, "x-part of y basis vector"), + ("Fixed", "yy", None, None, "y-part of y basis vector"), + ("Fixed", "dx", None, None, "Translation in x direction"), + ("Fixed", "dy", None, None, "Translation in y direction"), + ], + ), + ( + "VarAffine2x3", + [ + ("Fixed", "xx", None, None, "x-part of x basis vector. VarIndexBase + 0."), + ("Fixed", "yx", None, None, "y-part of x basis vector. VarIndexBase + 1."), + ("Fixed", "xy", None, None, "x-part of y basis vector. VarIndexBase + 2."), + ("Fixed", "yy", None, None, "y-part of y basis vector. VarIndexBase + 3."), + ( + "Fixed", + "dx", + None, + None, + "Translation in x direction. VarIndexBase + 4.", + ), + ( + "Fixed", + "dy", + None, + None, + "Translation in y direction. VarIndexBase + 5.", + ), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + ( + "ColorStop", + [ + ("F2Dot14", "StopOffset", None, None, ""), + ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), + ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"), + ], + ), + ( + "VarColorStop", + [ + ("F2Dot14", "StopOffset", None, None, "VarIndexBase + 0."), + ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), + ( + "F2Dot14", + "Alpha", + None, + None, + "Values outsided [0.,1.] reserved. VarIndexBase + 1.", + ), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + ( + "ColorLine", + [ + ( + "ExtendMode", + "Extend", + None, + None, + "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}", + ), + ("uint16", "StopCount", None, None, "Number of Color stops."), + ("ColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."), + ], + ), + ( + "VarColorLine", + [ + ( + "ExtendMode", + "Extend", + None, + None, + "Enum {PAD = 0, REPEAT = 1, REFLECT = 2}", + ), + ("uint16", "StopCount", None, None, "Number of Color stops."), + ("VarColorStop", "ColorStop", "StopCount", 0, "Array of Color stops."), + ], + ), + # PaintColrLayers + ( + "PaintFormat1", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 1"), + ( + "uint8", + "NumLayers", + None, + None, + "Number of offsets to Paint to read from LayerList.", + ), + ("uint32", "FirstLayerIndex", None, None, "Index into LayerList."), + ], + ), + # PaintSolid + ( + "PaintFormat2", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 2"), + ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), + ("F2Dot14", "Alpha", None, None, "Values outsided [0.,1.] reserved"), + ], + ), + # PaintVarSolid + ( + "PaintFormat3", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 3"), + ("uint16", "PaletteIndex", None, None, "Index for a CPAL palette entry."), + ( + "F2Dot14", + "Alpha", + None, + None, + "Values outsided [0.,1.] reserved. VarIndexBase + 0.", + ), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintLinearGradient + ( + "PaintFormat4", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 4"), + ( + "Offset24", + "ColorLine", + None, + None, + "Offset (from beginning of PaintLinearGradient table) to ColorLine subtable.", + ), + ("int16", "x0", None, None, ""), + ("int16", "y0", None, None, ""), + ("int16", "x1", None, None, ""), + ("int16", "y1", None, None, ""), + ("int16", "x2", None, None, ""), + ("int16", "y2", None, None, ""), + ], + ), + # PaintVarLinearGradient + ( + "PaintFormat5", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 5"), + ( + "LOffset24To(VarColorLine)", + "ColorLine", + None, + None, + "Offset (from beginning of PaintVarLinearGradient table) to VarColorLine subtable.", + ), + ("int16", "x0", None, None, "VarIndexBase + 0."), + ("int16", "y0", None, None, "VarIndexBase + 1."), + ("int16", "x1", None, None, "VarIndexBase + 2."), + ("int16", "y1", None, None, "VarIndexBase + 3."), + ("int16", "x2", None, None, "VarIndexBase + 4."), + ("int16", "y2", None, None, "VarIndexBase + 5."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintRadialGradient + ( + "PaintFormat6", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 6"), + ( + "Offset24", + "ColorLine", + None, + None, + "Offset (from beginning of PaintRadialGradient table) to ColorLine subtable.", + ), + ("int16", "x0", None, None, ""), + ("int16", "y0", None, None, ""), + ("uint16", "r0", None, None, ""), + ("int16", "x1", None, None, ""), + ("int16", "y1", None, None, ""), + ("uint16", "r1", None, None, ""), + ], + ), + # PaintVarRadialGradient + ( + "PaintFormat7", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 7"), + ( + "LOffset24To(VarColorLine)", + "ColorLine", + None, + None, + "Offset (from beginning of PaintVarRadialGradient table) to VarColorLine subtable.", + ), + ("int16", "x0", None, None, "VarIndexBase + 0."), + ("int16", "y0", None, None, "VarIndexBase + 1."), + ("uint16", "r0", None, None, "VarIndexBase + 2."), + ("int16", "x1", None, None, "VarIndexBase + 3."), + ("int16", "y1", None, None, "VarIndexBase + 4."), + ("uint16", "r1", None, None, "VarIndexBase + 5."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintSweepGradient + ( + "PaintFormat8", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 8"), + ( + "Offset24", + "ColorLine", + None, + None, + "Offset (from beginning of PaintSweepGradient table) to ColorLine subtable.", + ), + ("int16", "centerX", None, None, "Center x coordinate."), + ("int16", "centerY", None, None, "Center y coordinate."), + ( + "BiasedAngle", + "startAngle", + None, + None, + "Start of the angular range of the gradient.", + ), + ( + "BiasedAngle", + "endAngle", + None, + None, + "End of the angular range of the gradient.", + ), + ], + ), + # PaintVarSweepGradient + ( + "PaintFormat9", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 9"), + ( + "LOffset24To(VarColorLine)", + "ColorLine", + None, + None, + "Offset (from beginning of PaintVarSweepGradient table) to VarColorLine subtable.", + ), + ("int16", "centerX", None, None, "Center x coordinate. VarIndexBase + 0."), + ("int16", "centerY", None, None, "Center y coordinate. VarIndexBase + 1."), + ( + "BiasedAngle", + "startAngle", + None, + None, + "Start of the angular range of the gradient. VarIndexBase + 2.", + ), + ( + "BiasedAngle", + "endAngle", + None, + None, + "End of the angular range of the gradient. VarIndexBase + 3.", + ), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintGlyph + ( + "PaintFormat10", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 10"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintGlyph table) to Paint subtable.", + ), + ("GlyphID", "Glyph", None, None, "Glyph ID for the source outline."), + ], + ), + # PaintColrGlyph + ( + "PaintFormat11", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 11"), + ( + "GlyphID", + "Glyph", + None, + None, + "Virtual glyph ID for a BaseGlyphList base glyph.", + ), + ], + ), + # PaintTransform + ( + "PaintFormat12", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 12"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintTransform table) to Paint subtable.", + ), + ( + "LOffset24To(Affine2x3)", + "Transform", + None, + None, + "2x3 matrix for 2D affine transformations.", + ), + ], + ), + # PaintVarTransform + ( + "PaintFormat13", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 13"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarTransform table) to Paint subtable.", + ), + ( + "LOffset24To(VarAffine2x3)", + "Transform", + None, + None, + "2x3 matrix for 2D affine transformations.", + ), + ], + ), + # PaintTranslate + ( + "PaintFormat14", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 14"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintTranslate table) to Paint subtable.", + ), + ("int16", "dx", None, None, "Translation in x direction."), + ("int16", "dy", None, None, "Translation in y direction."), + ], + ), + # PaintVarTranslate + ( + "PaintFormat15", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 15"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarTranslate table) to Paint subtable.", + ), + ( + "int16", + "dx", + None, + None, + "Translation in x direction. VarIndexBase + 0.", + ), + ( + "int16", + "dy", + None, + None, + "Translation in y direction. VarIndexBase + 1.", + ), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintScale + ( + "PaintFormat16", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 16"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintScale table) to Paint subtable.", + ), + ("F2Dot14", "scaleX", None, None, ""), + ("F2Dot14", "scaleY", None, None, ""), + ], + ), + # PaintVarScale + ( + "PaintFormat17", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 17"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarScale table) to Paint subtable.", + ), + ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."), + ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintScaleAroundCenter + ( + "PaintFormat18", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 18"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintScaleAroundCenter table) to Paint subtable.", + ), + ("F2Dot14", "scaleX", None, None, ""), + ("F2Dot14", "scaleY", None, None, ""), + ("int16", "centerX", None, None, ""), + ("int16", "centerY", None, None, ""), + ], + ), + # PaintVarScaleAroundCenter + ( + "PaintFormat19", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 19"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarScaleAroundCenter table) to Paint subtable.", + ), + ("F2Dot14", "scaleX", None, None, "VarIndexBase + 0."), + ("F2Dot14", "scaleY", None, None, "VarIndexBase + 1."), + ("int16", "centerX", None, None, "VarIndexBase + 2."), + ("int16", "centerY", None, None, "VarIndexBase + 3."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintScaleUniform + ( + "PaintFormat20", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 20"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintScaleUniform table) to Paint subtable.", + ), + ("F2Dot14", "scale", None, None, ""), + ], + ), + # PaintVarScaleUniform + ( + "PaintFormat21", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 21"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarScaleUniform table) to Paint subtable.", + ), + ("F2Dot14", "scale", None, None, "VarIndexBase + 0."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintScaleUniformAroundCenter + ( + "PaintFormat22", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 22"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintScaleUniformAroundCenter table) to Paint subtable.", + ), + ("F2Dot14", "scale", None, None, ""), + ("int16", "centerX", None, None, ""), + ("int16", "centerY", None, None, ""), + ], + ), + # PaintVarScaleUniformAroundCenter + ( + "PaintFormat23", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 23"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarScaleUniformAroundCenter table) to Paint subtable.", + ), + ("F2Dot14", "scale", None, None, "VarIndexBase + 0"), + ("int16", "centerX", None, None, "VarIndexBase + 1"), + ("int16", "centerY", None, None, "VarIndexBase + 2"), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintRotate + ( + "PaintFormat24", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 24"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintRotate table) to Paint subtable.", + ), + ("Angle", "angle", None, None, ""), + ], + ), + # PaintVarRotate + ( + "PaintFormat25", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 25"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarRotate table) to Paint subtable.", + ), + ("Angle", "angle", None, None, "VarIndexBase + 0."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintRotateAroundCenter + ( + "PaintFormat26", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 26"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintRotateAroundCenter table) to Paint subtable.", + ), + ("Angle", "angle", None, None, ""), + ("int16", "centerX", None, None, ""), + ("int16", "centerY", None, None, ""), + ], + ), + # PaintVarRotateAroundCenter + ( + "PaintFormat27", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 27"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarRotateAroundCenter table) to Paint subtable.", + ), + ("Angle", "angle", None, None, "VarIndexBase + 0."), + ("int16", "centerX", None, None, "VarIndexBase + 1."), + ("int16", "centerY", None, None, "VarIndexBase + 2."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintSkew + ( + "PaintFormat28", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 28"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintSkew table) to Paint subtable.", + ), + ("Angle", "xSkewAngle", None, None, ""), + ("Angle", "ySkewAngle", None, None, ""), + ], + ), + # PaintVarSkew + ( + "PaintFormat29", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 29"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarSkew table) to Paint subtable.", + ), + ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."), + ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintSkewAroundCenter + ( + "PaintFormat30", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 30"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintSkewAroundCenter table) to Paint subtable.", + ), + ("Angle", "xSkewAngle", None, None, ""), + ("Angle", "ySkewAngle", None, None, ""), + ("int16", "centerX", None, None, ""), + ("int16", "centerY", None, None, ""), + ], + ), + # PaintVarSkewAroundCenter + ( + "PaintFormat31", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 31"), + ( + "Offset24", + "Paint", + None, + None, + "Offset (from beginning of PaintVarSkewAroundCenter table) to Paint subtable.", + ), + ("Angle", "xSkewAngle", None, None, "VarIndexBase + 0."), + ("Angle", "ySkewAngle", None, None, "VarIndexBase + 1."), + ("int16", "centerX", None, None, "VarIndexBase + 2."), + ("int16", "centerY", None, None, "VarIndexBase + 3."), + ( + "VarIndex", + "VarIndexBase", + None, + None, + "Base index into DeltaSetIndexMap.", + ), + ], + ), + # PaintComposite + ( + "PaintFormat32", + [ + ("uint8", "PaintFormat", None, None, "Format identifier-format = 32"), + ( + "LOffset24To(Paint)", + "SourcePaint", + None, + None, + "Offset (from beginning of PaintComposite table) to source Paint subtable.", + ), + ( + "CompositeMode", + "CompositeMode", + None, + None, + "A CompositeMode enumeration value.", + ), + ( + "LOffset24To(Paint)", + "BackdropPaint", + None, + None, + "Offset (from beginning of PaintComposite table) to backdrop Paint subtable.", + ), + ], + ), + # + # avar + # + ( + "AxisValueMap", + [ + ( + "F2Dot14", + "FromCoordinate", + None, + None, + "A normalized coordinate value obtained using default normalization", + ), + ( + "F2Dot14", + "ToCoordinate", + None, + None, + "The modified, normalized coordinate value", + ), + ], + ), + ( + "AxisSegmentMap", + [ + ( + "uint16", + "PositionMapCount", + None, + None, + "The number of correspondence pairs for this axis", + ), + ( + "AxisValueMap", + "AxisValueMap", + "PositionMapCount", + 0, + "The array of axis value map records for this axis", + ), + ], + ), + ( + "avar", + [ + ( + "Version", + "Version", + None, + None, + "Version of the avar table- 0x00010000 or 0x00020000", + ), + ("uint16", "Reserved", None, None, "Permanently reserved; set to zero"), + ( + "uint16", + "AxisCount", + None, + None, + 'The number of variation axes for this font. This must be the same number as axisCount in the "fvar" table', + ), + ( + "AxisSegmentMap", + "AxisSegmentMap", + "AxisCount", + 0, + 'The segment maps array — one segment map for each axis, in the order of axes specified in the "fvar" table', + ), + ( + "LOffsetTo(DeltaSetIndexMap)", + "VarIdxMap", + None, + "Version >= 0x00020000", + "", + ), + ("LOffset", "VarStore", None, "Version >= 0x00020000", ""), + ], + ), +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otTraverse.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otTraverse.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9421872353fe388acbedcd63bfc9a04ddc519f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/otTraverse.py @@ -0,0 +1,162 @@ +"""Methods for traversing trees of otData-driven OpenType tables.""" + +from collections import deque +from typing import Callable, Deque, Iterable, List, Optional, Tuple +from .otBase import BaseTable + + +__all__ = [ + "bfs_base_table", + "dfs_base_table", + "SubTablePath", +] + + +class SubTablePath(Tuple[BaseTable.SubTableEntry, ...]): + def __str__(self) -> str: + path_parts = [] + for entry in self: + path_part = entry.name + if entry.index is not None: + path_part += f"[{entry.index}]" + path_parts.append(path_part) + return ".".join(path_parts) + + +# Given f(current frontier, new entries) add new entries to frontier +AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None] + + +def dfs_base_table( + root: BaseTable, + root_accessor: Optional[str] = None, + skip_root: bool = False, + predicate: Optional[Callable[[SubTablePath], bool]] = None, + iter_subtables_fn: Optional[ + Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] + ] = None, +) -> Iterable[SubTablePath]: + """Depth-first search tree of BaseTables. + + Args: + root (BaseTable): the root of the tree. + root_accessor (Optional[str]): attribute name for the root table, if any (mostly + useful for debugging). + skip_root (Optional[bool]): if True, the root itself is not visited, only its + children. + predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out + paths. If True, the path is yielded and its subtables are added to the + queue. If False, the path is skipped and its subtables are not traversed. + iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): + function to iterate over subtables of a table. If None, the default + BaseTable.iterSubTables() is used. + + Yields: + SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples + for each of the nodes in the tree. The last entry in a path is the current + subtable, whereas preceding ones refer to its parent tables all the way up to + the root. + """ + yield from _traverse_ot_data( + root, + root_accessor, + skip_root, + predicate, + lambda frontier, new: frontier.extendleft(reversed(new)), + iter_subtables_fn, + ) + + +def bfs_base_table( + root: BaseTable, + root_accessor: Optional[str] = None, + skip_root: bool = False, + predicate: Optional[Callable[[SubTablePath], bool]] = None, + iter_subtables_fn: Optional[ + Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] + ] = None, +) -> Iterable[SubTablePath]: + """Breadth-first search tree of BaseTables. + + Args: + the root of the tree. + root_accessor (Optional[str]): attribute name for the root table, if any (mostly + useful for debugging). + skip_root (Optional[bool]): if True, the root itself is not visited, only its + children. + predicate (Optional[Callable[[SubTablePath], bool]]): function to filter out + paths. If True, the path is yielded and its subtables are added to the + queue. If False, the path is skipped and its subtables are not traversed. + iter_subtables_fn (Optional[Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]]]): + function to iterate over subtables of a table. If None, the default + BaseTable.iterSubTables() is used. + + Yields: + SubTablePath: tuples of BaseTable.SubTableEntry(name, table, index) namedtuples + for each of the nodes in the tree. The last entry in a path is the current + subtable, whereas preceding ones refer to its parent tables all the way up to + the root. + """ + yield from _traverse_ot_data( + root, + root_accessor, + skip_root, + predicate, + lambda frontier, new: frontier.extend(new), + iter_subtables_fn, + ) + + +def _traverse_ot_data( + root: BaseTable, + root_accessor: Optional[str], + skip_root: bool, + predicate: Optional[Callable[[SubTablePath], bool]], + add_to_frontier_fn: AddToFrontierFn, + iter_subtables_fn: Optional[ + Callable[[BaseTable], Iterable[BaseTable.SubTableEntry]] + ] = None, +) -> Iterable[SubTablePath]: + # no visited because general otData cannot cycle (forward-offset only) + if root_accessor is None: + root_accessor = type(root).__name__ + + if predicate is None: + + def predicate(path): + return True + + if iter_subtables_fn is None: + + def iter_subtables_fn(table): + return table.iterSubTables() + + frontier: Deque[SubTablePath] = deque() + + root_entry = BaseTable.SubTableEntry(root_accessor, root) + if not skip_root: + frontier.append((root_entry,)) + else: + add_to_frontier_fn( + frontier, + [ + (root_entry, subtable_entry) + for subtable_entry in iter_subtables_fn(root) + ], + ) + + while frontier: + # path is (value, attr_name) tuples. attr_name is attr of parent to get value + path = frontier.popleft() + current = path[-1].value + + if not predicate(path): + continue + + yield SubTablePath(path) + + new_entries = [ + path + (subtable_entry,) for subtable_entry in iter_subtables_fn(current) + ] + + add_to_frontier_fn(frontier, new_entries) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py new file mode 100644 index 0000000000000000000000000000000000000000..b744a2a3bc88907ef027ad7670b1b04f164dd44a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixGlyph.py @@ -0,0 +1,149 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import readHex, safeEval +import struct + + +sbixGlyphHeaderFormat = """ + > + originOffsetX: h # The x-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + originOffsetY: h # The y-value of the point in the glyph relative to its + # lower-left corner which corresponds to the origin of + # the glyph on the screen, that is the point on the + # baseline at the left edge of the glyph. + graphicType: 4s # e.g. "png " +""" + +sbixGlyphHeaderFormatSize = sstruct.calcsize(sbixGlyphHeaderFormat) + + +class Glyph(object): + def __init__( + self, + glyphName=None, + referenceGlyphName=None, + originOffsetX=0, + originOffsetY=0, + graphicType=None, + imageData=None, + rawdata=None, + gid=0, + ): + self.gid = gid + self.glyphName = glyphName + self.referenceGlyphName = referenceGlyphName + self.originOffsetX = originOffsetX + self.originOffsetY = originOffsetY + self.rawdata = rawdata + self.graphicType = graphicType + self.imageData = imageData + + # fix self.graphicType if it is null terminated or too short + if self.graphicType is not None: + if self.graphicType[-1] == "\0": + self.graphicType = self.graphicType[:-1] + if len(self.graphicType) > 4: + from fontTools import ttLib + + raise ttLib.TTLibError( + "Glyph.graphicType must not be longer than 4 characters." + ) + elif len(self.graphicType) < 4: + # pad with spaces + self.graphicType += " "[: (4 - len(self.graphicType))] + + def is_reference_type(self): + """Returns True if this glyph is a reference to another glyph's image data.""" + return self.graphicType == "dupe" or self.graphicType == "flip" + + def decompile(self, ttFont): + self.glyphName = ttFont.getGlyphName(self.gid) + if self.rawdata is None: + from fontTools import ttLib + + raise ttLib.TTLibError("No table data to decompile") + if len(self.rawdata) > 0: + if len(self.rawdata) < sbixGlyphHeaderFormatSize: + from fontTools import ttLib + + # print "Glyph %i header too short: Expected %x, got %x." % (self.gid, sbixGlyphHeaderFormatSize, len(self.rawdata)) + raise ttLib.TTLibError("Glyph header too short.") + + sstruct.unpack( + sbixGlyphHeaderFormat, self.rawdata[:sbixGlyphHeaderFormatSize], self + ) + + if self.is_reference_type(): + # this glyph is a reference to another glyph's image data + (gid,) = struct.unpack(">H", self.rawdata[sbixGlyphHeaderFormatSize:]) + self.referenceGlyphName = ttFont.getGlyphName(gid) + else: + self.imageData = self.rawdata[sbixGlyphHeaderFormatSize:] + self.referenceGlyphName = None + # clean up + del self.rawdata + del self.gid + + def compile(self, ttFont): + if self.glyphName is None: + from fontTools import ttLib + + raise ttLib.TTLibError("Can't compile Glyph without glyph name") + # TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index? + # (needed if you just want to compile the sbix table on its own) + self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName)) + if self.graphicType is None: + rawdata = b"" + else: + rawdata = sstruct.pack(sbixGlyphHeaderFormat, self) + if self.is_reference_type(): + rawdata += struct.pack(">H", ttFont.getGlyphID(self.referenceGlyphName)) + else: + assert self.imageData is not None + rawdata += self.imageData + self.rawdata = rawdata + + def toXML(self, xmlWriter, ttFont): + if self.graphicType is None: + # TODO: ignore empty glyphs? + # a glyph data entry is required for each glyph, + # but empty ones can be calculated at compile time + xmlWriter.simpletag("glyph", name=self.glyphName) + xmlWriter.newline() + return + xmlWriter.begintag( + "glyph", + graphicType=self.graphicType, + name=self.glyphName, + originOffsetX=self.originOffsetX, + originOffsetY=self.originOffsetY, + ) + xmlWriter.newline() + if self.is_reference_type(): + # this glyph is a reference to another glyph id. + xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName) + else: + xmlWriter.begintag("hexdata") + xmlWriter.newline() + xmlWriter.dumphex(self.imageData) + xmlWriter.endtag("hexdata") + xmlWriter.newline() + xmlWriter.endtag("glyph") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "ref": + # this glyph i.e. a reference to another glyph's image data. + # in this case imageData contains the glyph id of the reference glyph + # get glyph id from glyphname + glyphname = safeEval("'''" + attrs["glyphname"] + "'''") + self.imageData = struct.pack(">H", ttFont.getGlyphID(glyphname)) + self.referenceGlyphName = glyphname + elif name == "hexdata": + self.imageData = readHex(content) + else: + from fontTools import ttLib + + raise ttLib.TTLibError("can't handle '%s' element" % name) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixStrike.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixStrike.py new file mode 100644 index 0000000000000000000000000000000000000000..7614af4c7b325c363c0b30edfc85a478aa15f01b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/sbixStrike.py @@ -0,0 +1,177 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from .sbixGlyph import Glyph +import struct + +sbixStrikeHeaderFormat = """ + > + ppem: H # The PPEM for which this strike was designed (e.g., 9, + # 12, 24) + resolution: H # The screen resolution (in dpi) for which this strike + # was designed (e.g., 72) +""" + +sbixGlyphDataOffsetFormat = """ + > + glyphDataOffset: L # Offset from the beginning of the strike data record + # to data for the individual glyph +""" + +sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat) +sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat) + + +class Strike(object): + def __init__(self, rawdata=None, ppem=0, resolution=72): + self.data = rawdata + self.ppem = ppem + self.resolution = resolution + self.glyphs = {} + + def decompile(self, ttFont): + if self.data is None: + from fontTools import ttLib + + raise ttLib.TTLibError + if len(self.data) < sbixStrikeHeaderFormatSize: + from fontTools import ttLib + + raise ( + ttLib.TTLibError, + "Strike header too short: Expected %x, got %x.", + ) % (sbixStrikeHeaderFormatSize, len(self.data)) + + # read Strike header from raw data + sstruct.unpack( + sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self + ) + + # calculate number of glyphs + (firstGlyphDataOffset,) = struct.unpack( + ">L", + self.data[ + sbixStrikeHeaderFormatSize : sbixStrikeHeaderFormatSize + + sbixGlyphDataOffsetFormatSize + ], + ) + self.numGlyphs = ( + firstGlyphDataOffset - sbixStrikeHeaderFormatSize + ) // sbixGlyphDataOffsetFormatSize - 1 + # ^ -1 because there's one more offset than glyphs + + # build offset list for single glyph data offsets + self.glyphDataOffsets = [] + for i in range( + self.numGlyphs + 1 + ): # + 1 because there's one more offset than glyphs + start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize + (current_offset,) = struct.unpack( + ">L", self.data[start : start + sbixGlyphDataOffsetFormatSize] + ) + self.glyphDataOffsets.append(current_offset) + + # iterate through offset list and slice raw data into glyph data records + for i in range(self.numGlyphs): + current_glyph = Glyph( + rawdata=self.data[ + self.glyphDataOffsets[i] : self.glyphDataOffsets[i + 1] + ], + gid=i, + ) + current_glyph.decompile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + del self.glyphDataOffsets + del self.numGlyphs + del self.data + + def compile(self, ttFont): + self.glyphDataOffsets = b"" + self.bitmapData = b"" + + glyphOrder = ttFont.getGlyphOrder() + + # first glyph starts right after the header + currentGlyphDataOffset = ( + sbixStrikeHeaderFormatSize + + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1) + ) + for glyphName in glyphOrder: + if glyphName in self.glyphs: + # we have glyph data for this glyph + current_glyph = self.glyphs[glyphName] + else: + # must add empty glyph data record for this glyph + current_glyph = Glyph(glyphName=glyphName) + current_glyph.compile(ttFont) + current_glyph.glyphDataOffset = currentGlyphDataOffset + self.bitmapData += current_glyph.rawdata + currentGlyphDataOffset += len(current_glyph.rawdata) + self.glyphDataOffsets += sstruct.pack( + sbixGlyphDataOffsetFormat, current_glyph + ) + + # add last "offset", really the end address of the last glyph data record + dummy = Glyph() + dummy.glyphDataOffset = currentGlyphDataOffset + self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy) + + # pack header + self.data = sstruct.pack(sbixStrikeHeaderFormat, self) + # add offsets and image data after header + self.data += self.glyphDataOffsets + self.bitmapData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.begintag("strike") + xmlWriter.newline() + xmlWriter.simpletag("ppem", value=self.ppem) + xmlWriter.newline() + xmlWriter.simpletag("resolution", value=self.resolution) + xmlWriter.newline() + glyphOrder = ttFont.getGlyphOrder() + for i in range(len(glyphOrder)): + if glyphOrder[i] in self.glyphs: + self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont) + # TODO: what if there are more glyph data records than (glyf table) glyphs? + xmlWriter.endtag("strike") + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name in ["ppem", "resolution"]: + setattr(self, name, safeEval(attrs["value"])) + elif name == "glyph": + if "graphicType" in attrs: + myFormat = safeEval("'''" + attrs["graphicType"] + "'''") + else: + myFormat = None + if "glyphname" in attrs: + myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''") + elif "name" in attrs: + myGlyphName = safeEval("'''" + attrs["name"] + "'''") + else: + from fontTools import ttLib + + raise ttLib.TTLibError("Glyph must have a glyph name.") + if "originOffsetX" in attrs: + myOffsetX = safeEval(attrs["originOffsetX"]) + else: + myOffsetX = 0 + if "originOffsetY" in attrs: + myOffsetY = safeEval(attrs["originOffsetY"]) + else: + myOffsetY = 0 + current_glyph = Glyph( + glyphName=myGlyphName, + graphicType=myFormat, + originOffsetX=myOffsetX, + originOffsetY=myOffsetY, + ) + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_glyph.fromXML(name, attrs, content, ttFont) + current_glyph.compile(ttFont) + self.glyphs[current_glyph.glyphName] = current_glyph + else: + from fontTools import ttLib + + raise ttLib.TTLibError("can't handle '%s' element" % name) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Download.BLM_J5wv.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Download.BLM_J5wv.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..15ed6f7fe33d946856563b6222fa3404a7b0944d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Download.BLM_J5wv.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e1a3e1b9b4e22c96c671012d3cbe599322f1d13d8b2c64b13dcfb47094c9dfe +size 663 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.CaFoxV_Y.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.CaFoxV_Y.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..e9ecc1444c8786372abb7c9c2dfe248cf461222d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.CaFoxV_Y.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d48c41e0d19a90c874c5a197fe0609949256eeeb9477d99bc453f6f4a9a8d3e +size 8927 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.VQu3lFWN.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.VQu3lFWN.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..06e31d47d1dbb59e24b71340cbe0176c9624a0e6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.VQu3lFWN.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b6dddb8c6aef8b72e60755e425843ec2822d9cd01027c6996b75e5e8400d22 +size 1504 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/LineChart.D-OPS8mj.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/LineChart.D-OPS8mj.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..56b2d4c77b180a73169ac819cf2168183a16ebed --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/LineChart.D-OPS8mj.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fefa2046b2b8e39deb5167b21104aef7f675a4bbf34698e91f35ba478196856 +size 749 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.CJcgre1d.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.CJcgre1d.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..36dbbd730a09542bc2dcda36376a9996db12d65e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.CJcgre1d.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c669d706b970203bfd35c9c33cbd04c31776cafefa0e13e07ff3fb13113ad3ef +size 19849 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.COuHOEdr.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.COuHOEdr.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..1cf865efcac007dad99ae324786ef274301dfb79 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.COuHOEdr.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a859ab7c197340686d1304816ef0c27a3cfb76b07bdd2de0fee4fe8a338c8bb3 +size 9096 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.DTU9giFV.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.DTU9giFV.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..a396406fa269a29e83e5378c7f6696fc03061bc0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/index.DTU9giFV.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90861941c8d10315b5a6baaf3971dadf7a7fe704738eb32a4580dbd80b83f9da +size 403