diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/afmLib.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/afmLib.py new file mode 100644 index 0000000000000000000000000000000000000000..0aabf7f6356df7209ba15b4242cacc89cc558993 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/afmLib.py @@ -0,0 +1,439 @@ +"""Module for reading and writing AFM (Adobe Font Metrics) files. + +Note that this has been designed to read in AFM files generated by Fontographer +and has not been tested on many other files. In particular, it does not +implement the whole Adobe AFM specification [#f1]_ but, it should read most +"common" AFM files. + +Here is an example of using `afmLib` to read, modify and write an AFM file: + + >>> from fontTools.afmLib import AFM + >>> f = AFM("Tests/afmLib/data/TestAFM.afm") + >>> + >>> # Accessing a pair gets you the kern value + >>> f[("V","A")] + -60 + >>> + >>> # Accessing a glyph name gets you metrics + >>> f["A"] + (65, 668, (8, -25, 660, 666)) + >>> # (charnum, width, bounding box) + >>> + >>> # Accessing an attribute gets you metadata + >>> f.FontName + 'TestFont-Regular' + >>> f.FamilyName + 'TestFont' + >>> f.Weight + 'Regular' + >>> f.XHeight + 500 + >>> f.Ascender + 750 + >>> + >>> # Attributes and items can also be set + >>> f[("A","V")] = -150 # Tighten kerning + >>> f.FontName = "TestFont Squished" + >>> + >>> # And the font written out again (remove the # in front) + >>> #f.write("testfont-squished.afm") + +.. rubric:: Footnotes + +.. [#f1] `Adobe Technote 5004 `_, + Adobe Font Metrics File Format Specification. + +""" + +import re + +# every single line starts with a "word" +identifierRE = re.compile(r"^([A-Za-z]+).*") + +# regular expression to parse char lines +charRE = re.compile( + r"(-?\d+)" # charnum + r"\s*;\s*WX\s+" # ; WX + r"(-?\d+)" # width + r"\s*;\s*N\s+" # ; N + r"([.A-Za-z0-9_]+)" # charname + r"\s*;\s*B\s+" # ; B + r"(-?\d+)" # left + r"\s+" + r"(-?\d+)" # bottom + r"\s+" + r"(-?\d+)" # right + r"\s+" + r"(-?\d+)" # top + r"\s*;\s*" # ; +) + +# regular expression to parse kerning lines +kernRE = re.compile( + r"([.A-Za-z0-9_]+)" # leftchar + r"\s+" + r"([.A-Za-z0-9_]+)" # rightchar + r"\s+" + r"(-?\d+)" # value + r"\s*" +) + +# regular expressions to parse composite info lines of the form: +# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ; +compositeRE = re.compile( + r"([.A-Za-z0-9_]+)" # char name + r"\s+" + r"(\d+)" # number of parts + r"\s*;\s*" +) +componentRE = re.compile( + r"PCC\s+" # PPC + r"([.A-Za-z0-9_]+)" # base char name + r"\s+" + r"(-?\d+)" # x offset + r"\s+" + r"(-?\d+)" # y offset + r"\s*;\s*" +) + +preferredAttributeOrder = [ + "FontName", + "FullName", + "FamilyName", + "Weight", + "ItalicAngle", + "IsFixedPitch", + "FontBBox", + "UnderlinePosition", + "UnderlineThickness", + "Version", + "Notice", + "EncodingScheme", + "CapHeight", + "XHeight", + "Ascender", + "Descender", +] + + +class error(Exception): + pass + + +class AFM(object): + _attrs = None + + _keywords = [ + "StartFontMetrics", + "EndFontMetrics", + "StartCharMetrics", + "EndCharMetrics", + "StartKernData", + "StartKernPairs", + "EndKernPairs", + "EndKernData", + "StartComposites", + "EndComposites", + ] + + def __init__(self, path=None): + """AFM file reader. + + Instantiating an object with a path name will cause the file to be opened, + read, and parsed. Alternatively the path can be left unspecified, and a + file can be parsed later with the :meth:`read` method.""" + self._attrs = {} + self._chars = {} + self._kerning = {} + self._index = {} + self._comments = [] + self._composites = {} + if path is not None: + self.read(path) + + def read(self, path): + """Opens, reads and parses a file.""" + lines = readlines(path) + for line in lines: + if not line.strip(): + continue + m = identifierRE.match(line) + if m is None: + raise error("syntax error in AFM file: " + repr(line)) + + pos = m.regs[1][1] + word = line[:pos] + rest = line[pos:].strip() + if word in self._keywords: + continue + if word == "C": + self.parsechar(rest) + elif word == "KPX": + self.parsekernpair(rest) + elif word == "CC": + self.parsecomposite(rest) + else: + self.parseattr(word, rest) + + def parsechar(self, rest): + m = charRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + charname = things[2] + del things[2] + charnum, width, l, b, r, t = (int(thing) for thing in things) + self._chars[charname] = charnum, width, (l, b, r, t) + + def parsekernpair(self, rest): + m = kernRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + things = [] + for fr, to in m.regs[1:]: + things.append(rest[fr:to]) + leftchar, rightchar, value = things + value = int(value) + self._kerning[(leftchar, rightchar)] = value + + def parseattr(self, word, rest): + if word == "FontBBox": + l, b, r, t = [int(thing) for thing in rest.split()] + self._attrs[word] = l, b, r, t + elif word == "Comment": + self._comments.append(rest) + else: + try: + value = int(rest) + except (ValueError, OverflowError): + self._attrs[word] = rest + else: + self._attrs[word] = value + + def parsecomposite(self, rest): + m = compositeRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + charname = m.group(1) + ncomponents = int(m.group(2)) + rest = rest[m.regs[0][1] :] + components = [] + while True: + m = componentRE.match(rest) + if m is None: + raise error("syntax error in AFM file: " + repr(rest)) + basechar = m.group(1) + xoffset = int(m.group(2)) + yoffset = int(m.group(3)) + components.append((basechar, xoffset, yoffset)) + rest = rest[m.regs[0][1] :] + if not rest: + break + assert len(components) == ncomponents + self._composites[charname] = components + + def write(self, path, sep="\r"): + """Writes out an AFM font to the given path.""" + import time + + lines = [ + "StartFontMetrics 2.0", + "Comment Generated by afmLib; at %s" + % (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))), + ] + + # write comments, assuming (possibly wrongly!) they should + # all appear at the top + for comment in self._comments: + lines.append("Comment " + comment) + + # write attributes, first the ones we know about, in + # a preferred order + attrs = self._attrs + for attr in preferredAttributeOrder: + if attr in attrs: + value = attrs[attr] + if attr == "FontBBox": + value = "%s %s %s %s" % value + lines.append(attr + " " + str(value)) + # then write the attributes we don't know about, + # in alphabetical order + items = sorted(attrs.items()) + for attr, value in items: + if attr in preferredAttributeOrder: + continue + lines.append(attr + " " + str(value)) + + # write char metrics + lines.append("StartCharMetrics " + repr(len(self._chars))) + items = [ + (charnum, (charname, width, box)) + for charname, (charnum, width, box) in self._chars.items() + ] + + def myKey(a): + """Custom key function to make sure unencoded chars (-1) + end up at the end of the list after sorting.""" + if a[0] == -1: + a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number + return a + + items.sort(key=myKey) + + for charnum, (charname, width, (l, b, r, t)) in items: + lines.append( + "C %d ; WX %d ; N %s ; B %d %d %d %d ;" + % (charnum, width, charname, l, b, r, t) + ) + lines.append("EndCharMetrics") + + # write kerning info + lines.append("StartKernData") + lines.append("StartKernPairs " + repr(len(self._kerning))) + items = sorted(self._kerning.items()) + for (leftchar, rightchar), value in items: + lines.append("KPX %s %s %d" % (leftchar, rightchar, value)) + lines.append("EndKernPairs") + lines.append("EndKernData") + + if self._composites: + composites = sorted(self._composites.items()) + lines.append("StartComposites %s" % len(self._composites)) + for charname, components in composites: + line = "CC %s %s ;" % (charname, len(components)) + for basechar, xoffset, yoffset in components: + line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset) + lines.append(line) + lines.append("EndComposites") + + lines.append("EndFontMetrics") + + writelines(path, lines, sep) + + def has_kernpair(self, pair): + """Returns `True` if the given glyph pair (specified as a tuple) exists + in the kerning dictionary.""" + return pair in self._kerning + + def kernpairs(self): + """Returns a list of all kern pairs in the kerning dictionary.""" + return list(self._kerning.keys()) + + def has_char(self, char): + """Returns `True` if the given glyph exists in the font.""" + return char in self._chars + + def chars(self): + """Returns a list of all glyph names in the font.""" + return list(self._chars.keys()) + + def comments(self): + """Returns all comments from the file.""" + return self._comments + + def addComment(self, comment): + """Adds a new comment to the file.""" + self._comments.append(comment) + + def addComposite(self, glyphName, components): + """Specifies that the glyph `glyphName` is made up of the given components. + The components list should be of the following form:: + + [ + (glyphname, xOffset, yOffset), + ... + ] + + """ + self._composites[glyphName] = components + + def __getattr__(self, attr): + if attr in self._attrs: + return self._attrs[attr] + else: + raise AttributeError(attr) + + def __setattr__(self, attr, value): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + self.__dict__[attr] = value + else: + self._attrs[attr] = value + + def __delattr__(self, attr): + # all attrs *not* starting with "_" are consider to be AFM keywords + if attr[:1] == "_": + try: + del self.__dict__[attr] + except KeyError: + raise AttributeError(attr) + else: + try: + del self._attrs[attr] + except KeyError: + raise AttributeError(attr) + + def __getitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, return the kernpair + return self._kerning[key] + else: + # return the metrics instead + return self._chars[key] + + def __setitem__(self, key, value): + if isinstance(key, tuple): + # key is a tuple, set kernpair + self._kerning[key] = value + else: + # set char metrics + self._chars[key] = value + + def __delitem__(self, key): + if isinstance(key, tuple): + # key is a tuple, del kernpair + del self._kerning[key] + else: + # del char metrics + del self._chars[key] + + def __repr__(self): + if hasattr(self, "FullName"): + return "" % self.FullName + else: + return "" % id(self) + + +def readlines(path): + with open(path, "r", encoding="ascii") as f: + data = f.read() + return data.splitlines() + + +def writelines(path, lines, sep="\r"): + with open(path, "w", encoding="ascii", newline=sep) as f: + f.write("\n".join(lines) + "\n") + + +if __name__ == "__main__": + import EasyDialogs + + path = EasyDialogs.AskFileForOpen() + if path: + afm = AFM(path) + char = "A" + if afm.has_char(char): + print(afm[char]) # print charnum, width and boundingbox + pair = ("A", "V") + if afm.has_kernpair(pair): + print(afm[pair]) # print kerning value for pair + print(afm.Version) # various other afm entries have become attributes + print(afm.Weight) + # afm.comments() returns a list of all Comment lines found in the AFM + print(afm.comments()) + # print afm.chars() + # print afm.kernpairs() + print(afm) + afm.write(path + ".muck") diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/fontBuilder.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/fontBuilder.py new file mode 100644 index 0000000000000000000000000000000000000000..16b7ee167d21f90be779914f958a4e6cbe94391d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/fontBuilder.py @@ -0,0 +1,989 @@ +__all__ = ["FontBuilder"] + +""" +This module is *experimental*, meaning it still may evolve and change. + +The `FontBuilder` class is a convenient helper to construct working TTF or +OTF fonts from scratch. + +Note that the various setup methods cannot be called in arbitrary order, +due to various interdependencies between OpenType tables. Here is an order +that works: + + fb = FontBuilder(...) + fb.setupGlyphOrder(...) + fb.setupCharacterMap(...) + fb.setupGlyf(...) --or-- fb.setupCFF(...) + fb.setupHorizontalMetrics(...) + fb.setupHorizontalHeader() + fb.setupNameTable(...) + fb.setupOS2() + fb.addOpenTypeFeatures(...) + fb.setupPost() + fb.save(...) + +Here is how to build a minimal TTF: + +```python +from fontTools.fontBuilder import FontBuilder +from fontTools.pens.ttGlyphPen import TTGlyphPen + + +def drawTestGlyph(pen): + pen.moveTo((100, 100)) + pen.lineTo((100, 1000)) + pen.qCurveTo((200, 900), (400, 900), (500, 1000)) + pen.lineTo((500, 100)) + pen.closePath() + + +fb = FontBuilder(1024, isTTF=True) +fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) +fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) +advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} + +familyName = "HelloTestFont" +styleName = "TotallyNormal" +version = "0.1" + +nameStrings = dict( + familyName=dict(en=familyName, nl="HalloTestFont"), + styleName=dict(en=styleName, nl="TotaalNormaal"), + uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, + fullName=familyName + "-" + styleName, + psName=familyName + "-" + styleName, + version="Version " + version, +) + +pen = TTGlyphPen(None) +drawTestGlyph(pen) +glyph = pen.glyph() +glyphs = {".notdef": glyph, "space": glyph, "A": glyph, "a": glyph, ".null": glyph} +fb.setupGlyf(glyphs) +metrics = {} +glyphTable = fb.font["glyf"] +for gn, advanceWidth in advanceWidths.items(): + metrics[gn] = (advanceWidth, glyphTable[gn].xMin) +fb.setupHorizontalMetrics(metrics) +fb.setupHorizontalHeader(ascent=824, descent=-200) +fb.setupNameTable(nameStrings) +fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) +fb.setupPost() +fb.save("test.ttf") +``` + +And here's how to build a minimal OTF: + +```python +from fontTools.fontBuilder import FontBuilder +from fontTools.pens.t2CharStringPen import T2CharStringPen + + +def drawTestGlyph(pen): + pen.moveTo((100, 100)) + pen.lineTo((100, 1000)) + pen.curveTo((200, 900), (400, 900), (500, 1000)) + pen.lineTo((500, 100)) + pen.closePath() + + +fb = FontBuilder(1024, isTTF=False) +fb.setupGlyphOrder([".notdef", ".null", "space", "A", "a"]) +fb.setupCharacterMap({32: "space", 65: "A", 97: "a"}) +advanceWidths = {".notdef": 600, "space": 500, "A": 600, "a": 600, ".null": 0} + +familyName = "HelloTestFont" +styleName = "TotallyNormal" +version = "0.1" + +nameStrings = dict( + familyName=dict(en=familyName, nl="HalloTestFont"), + styleName=dict(en=styleName, nl="TotaalNormaal"), + uniqueFontIdentifier="fontBuilder: " + familyName + "." + styleName, + fullName=familyName + "-" + styleName, + psName=familyName + "-" + styleName, + version="Version " + version, +) + +pen = T2CharStringPen(600, None) +drawTestGlyph(pen) +charString = pen.getCharString() +charStrings = { + ".notdef": charString, + "space": charString, + "A": charString, + "a": charString, + ".null": charString, +} +fb.setupCFF(nameStrings["psName"], {"FullName": nameStrings["psName"]}, charStrings, {}) +lsb = {gn: cs.calcBounds(None)[0] for gn, cs in charStrings.items()} +metrics = {} +for gn, advanceWidth in advanceWidths.items(): + metrics[gn] = (advanceWidth, lsb[gn]) +fb.setupHorizontalMetrics(metrics) +fb.setupHorizontalHeader(ascent=824, descent=200) +fb.setupNameTable(nameStrings) +fb.setupOS2(sTypoAscender=824, usWinAscent=824, usWinDescent=200) +fb.setupPost() +fb.save("test.otf") +``` +""" + +from .ttLib import TTFont, newTable +from .ttLib.tables._c_m_a_p import cmap_classes +from .ttLib.tables._g_l_y_f import flagCubic +from .ttLib.tables.O_S_2f_2 import Panose +from .misc.timeTools import timestampNow +import struct +from collections import OrderedDict + + +_headDefaults = dict( + tableVersion=1.0, + fontRevision=1.0, + checkSumAdjustment=0, + magicNumber=0x5F0F3CF5, + flags=0x0003, + unitsPerEm=1000, + created=0, + modified=0, + xMin=0, + yMin=0, + xMax=0, + yMax=0, + macStyle=0, + lowestRecPPEM=3, + fontDirectionHint=2, + indexToLocFormat=0, + glyphDataFormat=0, +) + +_maxpDefaultsTTF = dict( + tableVersion=0x00010000, + numGlyphs=0, + maxPoints=0, + maxContours=0, + maxCompositePoints=0, + maxCompositeContours=0, + maxZones=2, + maxTwilightPoints=0, + maxStorage=0, + maxFunctionDefs=0, + maxInstructionDefs=0, + maxStackElements=0, + maxSizeOfInstructions=0, + maxComponentElements=0, + maxComponentDepth=0, +) +_maxpDefaultsOTF = dict( + tableVersion=0x00005000, + numGlyphs=0, +) + +_postDefaults = dict( + formatType=3.0, + italicAngle=0, + underlinePosition=0, + underlineThickness=0, + isFixedPitch=0, + minMemType42=0, + maxMemType42=0, + minMemType1=0, + maxMemType1=0, +) + +_hheaDefaults = dict( + tableVersion=0x00010000, + ascent=0, + descent=0, + lineGap=0, + advanceWidthMax=0, + minLeftSideBearing=0, + minRightSideBearing=0, + xMaxExtent=0, + caretSlopeRise=1, + caretSlopeRun=0, + caretOffset=0, + reserved0=0, + reserved1=0, + reserved2=0, + reserved3=0, + metricDataFormat=0, + numberOfHMetrics=0, +) + +_vheaDefaults = dict( + tableVersion=0x00010000, + ascent=0, + descent=0, + lineGap=0, + advanceHeightMax=0, + minTopSideBearing=0, + minBottomSideBearing=0, + yMaxExtent=0, + caretSlopeRise=0, + caretSlopeRun=0, + reserved0=0, + reserved1=0, + reserved2=0, + reserved3=0, + reserved4=0, + metricDataFormat=0, + numberOfVMetrics=0, +) + +_nameIDs = dict( + copyright=0, + familyName=1, + styleName=2, + uniqueFontIdentifier=3, + fullName=4, + version=5, + psName=6, + trademark=7, + manufacturer=8, + designer=9, + description=10, + vendorURL=11, + designerURL=12, + licenseDescription=13, + licenseInfoURL=14, + # reserved = 15, + typographicFamily=16, + typographicSubfamily=17, + compatibleFullName=18, + sampleText=19, + postScriptCIDFindfontName=20, + wwsFamilyName=21, + wwsSubfamilyName=22, + lightBackgroundPalette=23, + darkBackgroundPalette=24, + variationsPostScriptNamePrefix=25, +) + +# to insert in setupNameTable doc string: +# print("\n".join(("%s (nameID %s)" % (k, v)) for k, v in sorted(_nameIDs.items(), key=lambda x: x[1]))) + +_panoseDefaults = Panose() + +_OS2Defaults = dict( + version=3, + xAvgCharWidth=0, + usWeightClass=400, + usWidthClass=5, + fsType=0x0004, # default: Preview & Print embedding + ySubscriptXSize=0, + ySubscriptYSize=0, + ySubscriptXOffset=0, + ySubscriptYOffset=0, + ySuperscriptXSize=0, + ySuperscriptYSize=0, + ySuperscriptXOffset=0, + ySuperscriptYOffset=0, + yStrikeoutSize=0, + yStrikeoutPosition=0, + sFamilyClass=0, + panose=_panoseDefaults, + ulUnicodeRange1=0, + ulUnicodeRange2=0, + ulUnicodeRange3=0, + ulUnicodeRange4=0, + achVendID="????", + fsSelection=0, + usFirstCharIndex=0, + usLastCharIndex=0, + sTypoAscender=0, + sTypoDescender=0, + sTypoLineGap=0, + usWinAscent=0, + usWinDescent=0, + ulCodePageRange1=0, + ulCodePageRange2=0, + sxHeight=0, + sCapHeight=0, + usDefaultChar=0, # .notdef + usBreakChar=32, # space + usMaxContext=0, + usLowerOpticalPointSize=0, + usUpperOpticalPointSize=0, +) + + +class FontBuilder(object): + def __init__(self, unitsPerEm=None, font=None, isTTF=True, glyphDataFormat=0): + """Initialize a FontBuilder instance. + + If the `font` argument is not given, a new `TTFont` will be + constructed, and `unitsPerEm` must be given. If `isTTF` is True, + the font will be a glyf-based TTF; if `isTTF` is False it will be + a CFF-based OTF. + + The `glyphDataFormat` argument corresponds to the `head` table field + that defines the format of the TrueType `glyf` table (default=0). + TrueType glyphs historically can only contain quadratic splines and static + components, but there's a proposal to add support for cubic Bezier curves as well + as variable composites/components at + https://github.com/harfbuzz/boring-expansion-spec/blob/main/glyf1.md + You can experiment with the new features by setting `glyphDataFormat` to 1. + A ValueError is raised if `glyphDataFormat` is left at 0 but glyphs are added + that contain cubic splines or varcomposites. This is to prevent accidentally + creating fonts that are incompatible with existing TrueType implementations. + + If `font` is given, it must be a `TTFont` instance and `unitsPerEm` + must _not_ be given. The `isTTF` and `glyphDataFormat` arguments will be ignored. + """ + if font is None: + self.font = TTFont(recalcTimestamp=False) + self.isTTF = isTTF + now = timestampNow() + assert unitsPerEm is not None + self.setupHead( + unitsPerEm=unitsPerEm, + created=now, + modified=now, + glyphDataFormat=glyphDataFormat, + ) + self.setupMaxp() + else: + assert unitsPerEm is None + self.font = font + self.isTTF = "glyf" in font + + def save(self, file): + """Save the font. The 'file' argument can be either a pathname or a + writable file object. + """ + self.font.save(file) + + def _initTableWithValues(self, tableTag, defaults, values): + table = self.font[tableTag] = newTable(tableTag) + for k, v in defaults.items(): + setattr(table, k, v) + for k, v in values.items(): + setattr(table, k, v) + return table + + def _updateTableWithValues(self, tableTag, values): + table = self.font[tableTag] + for k, v in values.items(): + setattr(table, k, v) + + def setupHead(self, **values): + """Create a new `head` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("head", _headDefaults, values) + + def updateHead(self, **values): + """Update the head table with the fields and values passed as + keyword arguments. + """ + self._updateTableWithValues("head", values) + + def setupGlyphOrder(self, glyphOrder): + """Set the glyph order for the font.""" + self.font.setGlyphOrder(glyphOrder) + + def setupCharacterMap(self, cmapping, uvs=None, allowFallback=False): + """Build the `cmap` table for the font. The `cmapping` argument should + be a dict mapping unicode code points as integers to glyph names. + + The `uvs` argument, when passed, must be a list of tuples, describing + Unicode Variation Sequences. These tuples have three elements: + (unicodeValue, variationSelector, glyphName) + `unicodeValue` and `variationSelector` are integer code points. + `glyphName` may be None, to indicate this is the default variation. + Text processors will then use the cmap to find the glyph name. + Each Unicode Variation Sequence should be an officially supported + sequence, but this is not policed. + """ + subTables = [] + highestUnicode = max(cmapping) if cmapping else 0 + if highestUnicode > 0xFFFF: + cmapping_3_1 = dict((k, v) for k, v in cmapping.items() if k < 0x10000) + subTable_3_10 = buildCmapSubTable(cmapping, 12, 3, 10) + subTables.append(subTable_3_10) + else: + cmapping_3_1 = cmapping + format = 4 + subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) + try: + subTable_3_1.compile(self.font) + except struct.error: + # format 4 overflowed, fall back to format 12 + if not allowFallback: + raise ValueError( + "cmap format 4 subtable overflowed; sort glyph order by unicode to fix." + ) + format = 12 + subTable_3_1 = buildCmapSubTable(cmapping_3_1, format, 3, 1) + subTables.append(subTable_3_1) + subTable_0_3 = buildCmapSubTable(cmapping_3_1, format, 0, 3) + subTables.append(subTable_0_3) + + if uvs is not None: + uvsDict = {} + for unicodeValue, variationSelector, glyphName in uvs: + if cmapping.get(unicodeValue) == glyphName: + # this is a default variation + glyphName = None + if variationSelector not in uvsDict: + uvsDict[variationSelector] = [] + uvsDict[variationSelector].append((unicodeValue, glyphName)) + uvsSubTable = buildCmapSubTable({}, 14, 0, 5) + uvsSubTable.uvsDict = uvsDict + subTables.append(uvsSubTable) + + self.font["cmap"] = newTable("cmap") + self.font["cmap"].tableVersion = 0 + self.font["cmap"].tables = subTables + + def setupNameTable(self, nameStrings, windows=True, mac=True): + """Create the `name` table for the font. The `nameStrings` argument must + be a dict, mapping nameIDs or descriptive names for the nameIDs to name + record values. A value is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + + By default, both Windows (platformID=3) and Macintosh (platformID=1) name + records are added, unless any of `windows` or `mac` arguments is False. + + The following descriptive names are available for nameIDs: + + copyright (nameID 0) + familyName (nameID 1) + styleName (nameID 2) + uniqueFontIdentifier (nameID 3) + fullName (nameID 4) + version (nameID 5) + psName (nameID 6) + trademark (nameID 7) + manufacturer (nameID 8) + designer (nameID 9) + description (nameID 10) + vendorURL (nameID 11) + designerURL (nameID 12) + licenseDescription (nameID 13) + licenseInfoURL (nameID 14) + typographicFamily (nameID 16) + typographicSubfamily (nameID 17) + compatibleFullName (nameID 18) + sampleText (nameID 19) + postScriptCIDFindfontName (nameID 20) + wwsFamilyName (nameID 21) + wwsSubfamilyName (nameID 22) + lightBackgroundPalette (nameID 23) + darkBackgroundPalette (nameID 24) + variationsPostScriptNamePrefix (nameID 25) + """ + nameTable = self.font["name"] = newTable("name") + nameTable.names = [] + + for nameName, nameValue in nameStrings.items(): + if isinstance(nameName, int): + nameID = nameName + else: + nameID = _nameIDs[nameName] + if isinstance(nameValue, str): + nameValue = dict(en=nameValue) + nameTable.addMultilingualName( + nameValue, ttFont=self.font, nameID=nameID, windows=windows, mac=mac + ) + + def setupOS2(self, **values): + """Create a new `OS/2` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("OS/2", _OS2Defaults, values) + if "xAvgCharWidth" not in values: + assert ( + "hmtx" in self.font + ), "the 'hmtx' table must be setup before the 'OS/2' table" + self.font["OS/2"].recalcAvgCharWidth(self.font) + if not ( + "ulUnicodeRange1" in values + or "ulUnicodeRange2" in values + or "ulUnicodeRange3" in values + or "ulUnicodeRange3" in values + ): + assert ( + "cmap" in self.font + ), "the 'cmap' table must be setup before the 'OS/2' table" + self.font["OS/2"].recalcUnicodeRanges(self.font) + + def setupCFF(self, psName, fontInfo, charStringsDict, privateDict): + from .cffLib import ( + CFFFontSet, + TopDictIndex, + TopDict, + CharStrings, + GlobalSubrsIndex, + PrivateDict, + ) + + assert not self.isTTF + self.font.sfntVersion = "OTTO" + fontSet = CFFFontSet() + fontSet.major = 1 + fontSet.minor = 0 + fontSet.otFont = self.font + fontSet.fontNames = [psName] + fontSet.topDictIndex = TopDictIndex() + + globalSubrs = GlobalSubrsIndex() + fontSet.GlobalSubrs = globalSubrs + private = PrivateDict() + for key, value in privateDict.items(): + setattr(private, key, value) + fdSelect = None + fdArray = None + + topDict = TopDict() + topDict.charset = self.font.getGlyphOrder() + topDict.Private = private + topDict.GlobalSubrs = fontSet.GlobalSubrs + for key, value in fontInfo.items(): + setattr(topDict, key, value) + if "FontMatrix" not in fontInfo: + scale = 1 / self.font["head"].unitsPerEm + topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] + + charStrings = CharStrings( + None, topDict.charset, globalSubrs, private, fdSelect, fdArray + ) + for glyphName, charString in charStringsDict.items(): + charString.private = private + charString.globalSubrs = globalSubrs + charStrings[glyphName] = charString + topDict.CharStrings = charStrings + + fontSet.topDictIndex.append(topDict) + + self.font["CFF "] = newTable("CFF ") + self.font["CFF "].cff = fontSet + + def setupCFF2(self, charStringsDict, fdArrayList=None, regions=None): + from .cffLib import ( + CFFFontSet, + TopDictIndex, + TopDict, + CharStrings, + GlobalSubrsIndex, + PrivateDict, + FDArrayIndex, + FontDict, + ) + + assert not self.isTTF + self.font.sfntVersion = "OTTO" + fontSet = CFFFontSet() + fontSet.major = 2 + fontSet.minor = 0 + + cff2GetGlyphOrder = self.font.getGlyphOrder + fontSet.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder, None) + + globalSubrs = GlobalSubrsIndex() + fontSet.GlobalSubrs = globalSubrs + + if fdArrayList is None: + fdArrayList = [{}] + fdSelect = None + fdArray = FDArrayIndex() + fdArray.strings = None + fdArray.GlobalSubrs = globalSubrs + for privateDict in fdArrayList: + fontDict = FontDict() + fontDict.setCFF2(True) + private = PrivateDict() + for key, value in privateDict.items(): + setattr(private, key, value) + fontDict.Private = private + fdArray.append(fontDict) + + topDict = TopDict() + topDict.cff2GetGlyphOrder = cff2GetGlyphOrder + topDict.FDArray = fdArray + scale = 1 / self.font["head"].unitsPerEm + topDict.FontMatrix = [scale, 0, 0, scale, 0, 0] + + private = fdArray[0].Private + charStrings = CharStrings(None, None, globalSubrs, private, fdSelect, fdArray) + for glyphName, charString in charStringsDict.items(): + charString.private = private + charString.globalSubrs = globalSubrs + charStrings[glyphName] = charString + topDict.CharStrings = charStrings + + fontSet.topDictIndex.append(topDict) + + self.font["CFF2"] = newTable("CFF2") + self.font["CFF2"].cff = fontSet + + if regions: + self.setupCFF2Regions(regions) + + def setupCFF2Regions(self, regions): + from .varLib.builder import buildVarRegionList, buildVarData, buildVarStore + from .cffLib import VarStoreData + + assert "fvar" in self.font, "fvar must to be set up first" + assert "CFF2" in self.font, "CFF2 must to be set up first" + axisTags = [a.axisTag for a in self.font["fvar"].axes] + varRegionList = buildVarRegionList(regions, axisTags) + varData = buildVarData(list(range(len(regions))), None, optimize=False) + varStore = buildVarStore(varRegionList, [varData]) + vstore = VarStoreData(otVarStore=varStore) + topDict = self.font["CFF2"].cff.topDictIndex[0] + topDict.VarStore = vstore + for fontDict in topDict.FDArray: + fontDict.Private.vstore = vstore + + def setupGlyf(self, glyphs, calcGlyphBounds=True, validateGlyphFormat=True): + """Create the `glyf` table from a dict, that maps glyph names + to `fontTools.ttLib.tables._g_l_y_f.Glyph` objects, for example + as made by `fontTools.pens.ttGlyphPen.TTGlyphPen`. + + If `calcGlyphBounds` is True, the bounds of all glyphs will be + calculated. Only pass False if your glyph objects already have + their bounding box values set. + + If `validateGlyphFormat` is True, raise ValueError if any of the glyphs contains + cubic curves or is a variable composite but head.glyphDataFormat=0. + Set it to False to skip the check if you know in advance all the glyphs are + compatible with the specified glyphDataFormat. + """ + assert self.isTTF + + if validateGlyphFormat and self.font["head"].glyphDataFormat == 0: + for name, g in glyphs.items(): + if g.numberOfContours > 0 and any(f & flagCubic for f in g.flags): + raise ValueError( + f"Glyph {name!r} has cubic Bezier outlines, but glyphDataFormat=0; " + "either convert to quadratics with cu2qu or set glyphDataFormat=1." + ) + + self.font["loca"] = newTable("loca") + self.font["glyf"] = newTable("glyf") + self.font["glyf"].glyphs = glyphs + if hasattr(self.font, "glyphOrder"): + self.font["glyf"].glyphOrder = self.font.glyphOrder + if calcGlyphBounds: + self.calcGlyphBounds() + + def setupFvar(self, axes, instances): + """Adds an font variations table to the font. + + Args: + axes (list): See below. + instances (list): See below. + + ``axes`` should be a list of axes, with each axis either supplied as + a py:class:`.designspaceLib.AxisDescriptor` object, or a tuple in the + format ```tupletag, minValue, defaultValue, maxValue, name``. + The ``name`` is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + + ```instances`` should be a list of instances, with each instance either + supplied as a py:class:`.designspaceLib.InstanceDescriptor` object, or a + dict with keys ``location`` (mapping of axis tags to float values), + ``stylename`` and (optionally) ``postscriptfontname``. + The ``stylename`` is either a string, or a dict, mapping language codes + to strings, to allow localized name table entries. + """ + + addFvar(self.font, axes, instances) + + def setupAvar(self, axes, mappings=None): + """Adds an axis variations table to the font. + + Args: + axes (list): A list of py:class:`.designspaceLib.AxisDescriptor` objects. + """ + from .varLib import _add_avar + + if "fvar" not in self.font: + raise KeyError("'fvar' table is missing; can't add 'avar'.") + + axisTags = [axis.axisTag for axis in self.font["fvar"].axes] + axes = OrderedDict(enumerate(axes)) # Only values are used + _add_avar(self.font, axes, mappings, axisTags) + + def setupGvar(self, variations): + gvar = self.font["gvar"] = newTable("gvar") + gvar.version = 1 + gvar.reserved = 0 + gvar.variations = variations + + def calcGlyphBounds(self): + """Calculate the bounding boxes of all glyphs in the `glyf` table. + This is usually not called explicitly by client code. + """ + glyphTable = self.font["glyf"] + for glyph in glyphTable.glyphs.values(): + glyph.recalcBounds(glyphTable) + + def setupHorizontalMetrics(self, metrics): + """Create a new `hmtx` table, for horizontal metrics. + + The `metrics` argument must be a dict, mapping glyph names to + `(width, leftSidebearing)` tuples. + """ + self.setupMetrics("hmtx", metrics) + + def setupVerticalMetrics(self, metrics): + """Create a new `vmtx` table, for horizontal metrics. + + The `metrics` argument must be a dict, mapping glyph names to + `(height, topSidebearing)` tuples. + """ + self.setupMetrics("vmtx", metrics) + + def setupMetrics(self, tableTag, metrics): + """See `setupHorizontalMetrics()` and `setupVerticalMetrics()`.""" + assert tableTag in ("hmtx", "vmtx") + mtxTable = self.font[tableTag] = newTable(tableTag) + roundedMetrics = {} + for gn in metrics: + w, lsb = metrics[gn] + roundedMetrics[gn] = int(round(w)), int(round(lsb)) + mtxTable.metrics = roundedMetrics + + def setupHorizontalHeader(self, **values): + """Create a new `hhea` table initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("hhea", _hheaDefaults, values) + + def setupVerticalHeader(self, **values): + """Create a new `vhea` table initialize it with default values, + which can be overridden by keyword arguments. + """ + self._initTableWithValues("vhea", _vheaDefaults, values) + + def setupVerticalOrigins(self, verticalOrigins, defaultVerticalOrigin=None): + """Create a new `VORG` table. The `verticalOrigins` argument must be + a dict, mapping glyph names to vertical origin values. + + The `defaultVerticalOrigin` argument should be the most common vertical + origin value. If omitted, this value will be derived from the actual + values in the `verticalOrigins` argument. + """ + if defaultVerticalOrigin is None: + # find the most frequent vorg value + bag = {} + for gn in verticalOrigins: + vorg = verticalOrigins[gn] + if vorg not in bag: + bag[vorg] = 1 + else: + bag[vorg] += 1 + defaultVerticalOrigin = sorted( + bag, key=lambda vorg: bag[vorg], reverse=True + )[0] + self._initTableWithValues( + "VORG", + {}, + dict(VOriginRecords={}, defaultVertOriginY=defaultVerticalOrigin), + ) + vorgTable = self.font["VORG"] + vorgTable.majorVersion = 1 + vorgTable.minorVersion = 0 + for gn in verticalOrigins: + vorgTable[gn] = verticalOrigins[gn] + + def setupPost(self, keepGlyphNames=True, **values): + """Create a new `post` table and initialize it with default values, + which can be overridden by keyword arguments. + """ + isCFF2 = "CFF2" in self.font + postTable = self._initTableWithValues("post", _postDefaults, values) + if (self.isTTF or isCFF2) and keepGlyphNames: + postTable.formatType = 2.0 + postTable.extraNames = [] + postTable.mapping = {} + else: + postTable.formatType = 3.0 + + def setupMaxp(self): + """Create a new `maxp` table. This is called implicitly by FontBuilder + itself and is usually not called by client code. + """ + if self.isTTF: + defaults = _maxpDefaultsTTF + else: + defaults = _maxpDefaultsOTF + self._initTableWithValues("maxp", defaults, {}) + + def setupDummyDSIG(self): + """This adds an empty DSIG table to the font to make some MS applications + happy. This does not properly sign the font. + """ + values = dict( + ulVersion=1, + usFlag=0, + usNumSigs=0, + signatureRecords=[], + ) + self._initTableWithValues("DSIG", {}, values) + + def addOpenTypeFeatures(self, features, filename=None, tables=None, debug=False): + """Add OpenType features to the font from a string containing + Feature File syntax. + + The `filename` argument is used in error messages and to determine + where to look for "include" files. + + The optional `tables` argument can be a list of OTL tables tags to + build, allowing the caller to only build selected OTL tables. See + `fontTools.feaLib` for details. + + The optional `debug` argument controls whether to add source debugging + information to the font in the `Debg` table. + """ + from .feaLib.builder import addOpenTypeFeaturesFromString + + addOpenTypeFeaturesFromString( + self.font, features, filename=filename, tables=tables, debug=debug + ) + + def addFeatureVariations(self, conditionalSubstitutions, featureTag="rvrn"): + """Add conditional substitutions to a Variable Font. + + See `fontTools.varLib.featureVars.addFeatureVariations`. + """ + from .varLib import featureVars + + if "fvar" not in self.font: + raise KeyError("'fvar' table is missing; can't add FeatureVariations.") + + featureVars.addFeatureVariations( + self.font, conditionalSubstitutions, featureTag=featureTag + ) + + def setupCOLR( + self, + colorLayers, + version=None, + varStore=None, + varIndexMap=None, + clipBoxes=None, + allowLayerReuse=True, + ): + """Build new COLR table using color layers dictionary. + + Cf. `fontTools.colorLib.builder.buildCOLR`. + """ + from fontTools.colorLib.builder import buildCOLR + + glyphMap = self.font.getReverseGlyphMap() + self.font["COLR"] = buildCOLR( + colorLayers, + version=version, + glyphMap=glyphMap, + varStore=varStore, + varIndexMap=varIndexMap, + clipBoxes=clipBoxes, + allowLayerReuse=allowLayerReuse, + ) + + def setupCPAL( + self, + palettes, + paletteTypes=None, + paletteLabels=None, + paletteEntryLabels=None, + ): + """Build new CPAL table using list of palettes. + + Optionally build CPAL v1 table using paletteTypes, paletteLabels and + paletteEntryLabels. + + Cf. `fontTools.colorLib.builder.buildCPAL`. + """ + from fontTools.colorLib.builder import buildCPAL + + self.font["CPAL"] = buildCPAL( + palettes, + paletteTypes=paletteTypes, + paletteLabels=paletteLabels, + paletteEntryLabels=paletteEntryLabels, + nameTable=self.font.get("name"), + ) + + def setupStat(self, axes, locations=None, elidedFallbackName=2): + """Build a new 'STAT' table. + + See `fontTools.otlLib.builder.buildStatTable` for details about + the arguments. + """ + from .otlLib.builder import buildStatTable + + buildStatTable(self.font, axes, locations, elidedFallbackName) + + +def buildCmapSubTable(cmapping, format, platformID, platEncID): + subTable = cmap_classes[format](format) + subTable.cmap = cmapping + subTable.platformID = platformID + subTable.platEncID = platEncID + subTable.language = 0 + return subTable + + +def addFvar(font, axes, instances): + from .ttLib.tables._f_v_a_r import Axis, NamedInstance + + assert axes + + fvar = newTable("fvar") + nameTable = font["name"] + + for axis_def in axes: + axis = Axis() + + if isinstance(axis_def, tuple): + ( + axis.axisTag, + axis.minValue, + axis.defaultValue, + axis.maxValue, + name, + ) = axis_def + else: + (axis.axisTag, axis.minValue, axis.defaultValue, axis.maxValue, name) = ( + axis_def.tag, + axis_def.minimum, + axis_def.default, + axis_def.maximum, + axis_def.name, + ) + if axis_def.hidden: + axis.flags = 0x0001 # HIDDEN_AXIS + + if isinstance(name, str): + name = dict(en=name) + + axis.axisNameID = nameTable.addMultilingualName(name, ttFont=font) + fvar.axes.append(axis) + + for instance in instances: + if isinstance(instance, dict): + coordinates = instance["location"] + name = instance["stylename"] + psname = instance.get("postscriptfontname") + else: + coordinates = instance.location + name = instance.localisedStyleName or instance.styleName + psname = instance.postScriptFontName + + if isinstance(name, str): + name = dict(en=name) + + inst = NamedInstance() + inst.subfamilyNameID = nameTable.addMultilingualName(name, ttFont=font) + if psname is not None: + inst.postscriptNameID = nameTable.addName(psname) + inst.coordinates = coordinates + fvar.instances.append(inst) + + font["fvar"] = fvar diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90f1efbbea7fe1d385d5c4412ae959f0dc9e1690 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__main__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2aa4b1abc30cbf7ce7c61435954d4699c2e00e2b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/__main__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/removeOverlaps.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/removeOverlaps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05fa85dec5795b25b92f8f7f6196b9c9ddc675e0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/removeOverlaps.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/reorderGlyphs.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/reorderGlyphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba7db30d426a939e7499a6e2e18b2e96f7c6fa47 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/reorderGlyphs.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/sfnt.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/sfnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dac61ce6e876868102531e2d47f31aaac53359c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/__pycache__/sfnt.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/macUtils.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/macUtils.py new file mode 100644 index 0000000000000000000000000000000000000000..0959a6fc2776ff4b7ff968031191aa05c6ec50a4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/macUtils.py @@ -0,0 +1,54 @@ +"""ttLib.macUtils.py -- Various Mac-specific stuff.""" + +from io import BytesIO +from fontTools.misc.macRes import ResourceReader, ResourceError + + +def getSFNTResIndices(path): + """Determine whether a file has a 'sfnt' resource fork or not.""" + try: + reader = ResourceReader(path) + indices = reader.getIndices("sfnt") + reader.close() + return indices + except ResourceError: + return [] + + +def openTTFonts(path): + """Given a pathname, return a list of TTFont objects. In the case + of a flat TTF/OTF file, the list will contain just one font object; + but in the case of a Mac font suitcase it will contain as many + font objects as there are sfnt resources in the file. + """ + from fontTools import ttLib + + fonts = [] + sfnts = getSFNTResIndices(path) + if not sfnts: + fonts.append(ttLib.TTFont(path)) + else: + for index in sfnts: + fonts.append(ttLib.TTFont(path, index)) + if not fonts: + raise ttLib.TTLibError("no fonts found in file '%s'" % path) + return fonts + + +class SFNTResourceReader(BytesIO): + """Simple read-only file wrapper for 'sfnt' resources.""" + + def __init__(self, path, res_name_or_index): + from fontTools import ttLib + + reader = ResourceReader(path) + if isinstance(res_name_or_index, str): + rsrc = reader.getNamedResource("sfnt", res_name_or_index) + else: + rsrc = reader.getIndResource("sfnt", res_name_or_index) + if rsrc is None: + raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index) + reader.close() + self.rsrc = rsrc + super(SFNTResourceReader, self).__init__(rsrc.data) + self.name = path diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/reorderGlyphs.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/reorderGlyphs.py new file mode 100644 index 0000000000000000000000000000000000000000..3221261f16a482b2a5dbc72ec47a184b6f7befa5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/reorderGlyphs.py @@ -0,0 +1,278 @@ +"""Reorder glyphs in a font.""" + +__author__ = "Rod Sheeter" + +# See https://docs.google.com/document/d/1h9O-C_ndods87uY0QeIIcgAMiX2gDTpvO_IhMJsKAqs/ +# for details. + + +from fontTools import ttLib +from fontTools.ttLib.tables import otBase +from fontTools.ttLib.tables import otTables as ot +from abc import ABC, abstractmethod +from dataclasses import dataclass +from collections import deque +from typing import ( + Optional, + Any, + Callable, + Deque, + Iterable, + List, + NamedTuple, + Tuple, + Union, +) + + +_COVERAGE_ATTR = "Coverage" # tables that have one coverage use this name + + +def _sort_by_gid( + get_glyph_id: Callable[[str], int], + glyphs: List[str], + parallel_list: Optional[List[Any]], +): + if parallel_list: + reordered = sorted( + ((g, e) for g, e in zip(glyphs, parallel_list)), + key=lambda t: get_glyph_id(t[0]), + ) + sorted_glyphs, sorted_parallel_list = map(list, zip(*reordered)) + parallel_list[:] = sorted_parallel_list + else: + sorted_glyphs = sorted(glyphs, key=get_glyph_id) + + glyphs[:] = sorted_glyphs + + +def _get_dotted_attr(value: Any, dotted_attr: str) -> Any: + attr_names = dotted_attr.split(".") + assert attr_names + + while attr_names: + attr_name = attr_names.pop(0) + value = getattr(value, attr_name) + return value + + +class ReorderRule(ABC): + """A rule to reorder something in a font to match the fonts glyph order.""" + + @abstractmethod + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: ... + + +@dataclass(frozen=True) +class ReorderCoverage(ReorderRule): + """Reorder a Coverage table, and optionally a list that is sorted parallel to it.""" + + # A list that is parallel to Coverage + parallel_list_attr: Optional[str] = None + coverage_attr: str = _COVERAGE_ATTR + + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: + coverage = _get_dotted_attr(value, self.coverage_attr) + + if type(coverage) is not list: + # Normal path, process one coverage that might have a parallel list + parallel_list = None + if self.parallel_list_attr: + parallel_list = _get_dotted_attr(value, self.parallel_list_attr) + assert ( + type(parallel_list) is list + ), f"{self.parallel_list_attr} should be a list" + assert len(parallel_list) == len(coverage.glyphs), "Nothing makes sense" + + _sort_by_gid(font.getGlyphID, coverage.glyphs, parallel_list) + + else: + # A few tables have a list of coverage. No parallel list can exist. + assert ( + not self.parallel_list_attr + ), f"Can't have multiple coverage AND a parallel list; {self}" + for coverage_entry in coverage: + _sort_by_gid(font.getGlyphID, coverage_entry.glyphs, None) + + +@dataclass(frozen=True) +class ReorderList(ReorderRule): + """Reorder the items within a list to match the updated glyph order. + + Useful when a list ordered by coverage itself contains something ordered by a gid. + For example, the PairSet table of https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable. + """ + + list_attr: str + key: str + + def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: + lst = _get_dotted_attr(value, self.list_attr) + assert isinstance(lst, list), f"{self.list_attr} should be a list" + lst.sort(key=lambda v: font.getGlyphID(getattr(v, self.key))) + + +# (Type, Optional Format) => List[ReorderRule] +# Encodes the relationships Cosimo identified +_REORDER_RULES = { + # GPOS + (ot.SinglePos, 1): [ReorderCoverage()], + (ot.SinglePos, 2): [ReorderCoverage(parallel_list_attr="Value")], + (ot.PairPos, 1): [ReorderCoverage(parallel_list_attr="PairSet")], + (ot.PairSet, None): [ReorderList("PairValueRecord", key="SecondGlyph")], + (ot.PairPos, 2): [ReorderCoverage()], + (ot.CursivePos, 1): [ReorderCoverage(parallel_list_attr="EntryExitRecord")], + (ot.MarkBasePos, 1): [ + ReorderCoverage( + coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" + ), + ReorderCoverage( + coverage_attr="BaseCoverage", parallel_list_attr="BaseArray.BaseRecord" + ), + ], + (ot.MarkLigPos, 1): [ + ReorderCoverage( + coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord" + ), + ReorderCoverage( + coverage_attr="LigatureCoverage", + parallel_list_attr="LigatureArray.LigatureAttach", + ), + ], + (ot.MarkMarkPos, 1): [ + ReorderCoverage( + coverage_attr="Mark1Coverage", parallel_list_attr="Mark1Array.MarkRecord" + ), + ReorderCoverage( + coverage_attr="Mark2Coverage", parallel_list_attr="Mark2Array.Mark2Record" + ), + ], + (ot.ContextPos, 1): [ReorderCoverage(parallel_list_attr="PosRuleSet")], + (ot.ContextPos, 2): [ReorderCoverage()], + (ot.ContextPos, 3): [ReorderCoverage()], + (ot.ChainContextPos, 1): [ReorderCoverage(parallel_list_attr="ChainPosRuleSet")], + (ot.ChainContextPos, 2): [ReorderCoverage()], + (ot.ChainContextPos, 3): [ + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="InputCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + # GSUB + (ot.ContextSubst, 1): [ReorderCoverage(parallel_list_attr="SubRuleSet")], + (ot.ContextSubst, 2): [ReorderCoverage()], + (ot.ContextSubst, 3): [ReorderCoverage()], + (ot.ChainContextSubst, 1): [ReorderCoverage(parallel_list_attr="ChainSubRuleSet")], + (ot.ChainContextSubst, 2): [ReorderCoverage()], + (ot.ChainContextSubst, 3): [ + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="InputCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + (ot.ReverseChainSingleSubst, 1): [ + ReorderCoverage(parallel_list_attr="Substitute"), + ReorderCoverage(coverage_attr="BacktrackCoverage"), + ReorderCoverage(coverage_attr="LookAheadCoverage"), + ], + # GDEF + (ot.AttachList, None): [ReorderCoverage(parallel_list_attr="AttachPoint")], + (ot.LigCaretList, None): [ReorderCoverage(parallel_list_attr="LigGlyph")], + (ot.MarkGlyphSetsDef, None): [ReorderCoverage()], + # MATH + (ot.MathGlyphInfo, None): [ReorderCoverage(coverage_attr="ExtendedShapeCoverage")], + (ot.MathItalicsCorrectionInfo, None): [ + ReorderCoverage(parallel_list_attr="ItalicsCorrection") + ], + (ot.MathTopAccentAttachment, None): [ + ReorderCoverage( + coverage_attr="TopAccentCoverage", parallel_list_attr="TopAccentAttachment" + ) + ], + (ot.MathKernInfo, None): [ + ReorderCoverage( + coverage_attr="MathKernCoverage", parallel_list_attr="MathKernInfoRecords" + ) + ], + (ot.MathVariants, None): [ + ReorderCoverage( + coverage_attr="VertGlyphCoverage", + parallel_list_attr="VertGlyphConstruction", + ), + ReorderCoverage( + coverage_attr="HorizGlyphCoverage", + parallel_list_attr="HorizGlyphConstruction", + ), + ], +} + + +# TODO Port to otTraverse + +SubTablePath = Tuple[otBase.BaseTable.SubTableEntry, ...] + + +def _bfs_base_table( + root: otBase.BaseTable, root_accessor: str +) -> Iterable[SubTablePath]: + yield from _traverse_ot_data( + root, root_accessor, lambda frontier, new: frontier.extend(new) + ) + + +# Given f(current frontier, new entries) add new entries to frontier +AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None] + + +def _traverse_ot_data( + root: otBase.BaseTable, root_accessor: str, add_to_frontier_fn: AddToFrontierFn +) -> Iterable[SubTablePath]: + # no visited because general otData is forward-offset only and thus cannot cycle + + frontier: Deque[SubTablePath] = deque() + frontier.append((otBase.BaseTable.SubTableEntry(root_accessor, root),)) + while frontier: + # path is (value, attr_name) tuples. attr_name is attr of parent to get value + path = frontier.popleft() + current = path[-1].value + + yield path + + new_entries = [] + for subtable_entry in current.iterSubTables(): + new_entries.append(path + (subtable_entry,)) + + add_to_frontier_fn(frontier, new_entries) + + +def reorderGlyphs(font: ttLib.TTFont, new_glyph_order: List[str]): + old_glyph_order = font.getGlyphOrder() + if len(new_glyph_order) != len(old_glyph_order): + raise ValueError( + f"New glyph order contains {len(new_glyph_order)} glyphs, " + f"but font has {len(old_glyph_order)} glyphs" + ) + + if set(old_glyph_order) != set(new_glyph_order): + raise ValueError( + "New glyph order does not contain the same set of glyphs as the font:\n" + f"* only in new: {set(new_glyph_order) - set(old_glyph_order)}\n" + f"* only in old: {set(old_glyph_order) - set(new_glyph_order)}" + ) + + # Changing the order of glyphs in a TTFont requires that all tables that use + # glyph indexes have been fully. + # Cf. https://github.com/fonttools/fonttools/issues/2060 + font.ensureDecompiled() + not_loaded = sorted(t for t in font.keys() if not font.isLoaded(t)) + if not_loaded: + raise ValueError(f"Everything should be loaded, following aren't: {not_loaded}") + + font.setGlyphOrder(new_glyph_order) + + coverage_containers = {"GDEF", "GPOS", "GSUB", "MATH"} + for tag in coverage_containers: + if tag in font.keys(): + for path in _bfs_base_table(font[tag].table, f'font["{tag}"]'): + value = path[-1].value + reorder_key = (type(value), getattr(value, "Format", None)) + for reorder in _REORDER_RULES.get(reorder_key, []): + reorder.apply(font, value) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/sfnt.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/sfnt.py new file mode 100644 index 0000000000000000000000000000000000000000..6cc867a4d7c068a0cddc2cfbf1dd00cba74444e6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/sfnt.py @@ -0,0 +1,662 @@ +"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format. + +Defines two public classes: + +- SFNTReader +- SFNTWriter + +(Normally you don't have to use these classes explicitly; they are +used automatically by ttLib.TTFont.) + +The reading and writing of sfnt files is separated in two distinct +classes, since whenever the number of tables changes or whenever +a table's length changes you need to rewrite the whole file anyway. +""" + +from io import BytesIO +from types import SimpleNamespace +from fontTools.misc.textTools import Tag +from fontTools.misc import sstruct +from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError +import struct +from collections import OrderedDict +import logging + + +log = logging.getLogger(__name__) + + +class SFNTReader(object): + def __new__(cls, *args, **kwargs): + """Return an instance of the SFNTReader sub-class which is compatible + with the input file type. + """ + if args and cls is SFNTReader: + infile = args[0] + infile.seek(0) + sfntVersion = Tag(infile.read(4)) + infile.seek(0) + if sfntVersion == "wOF2": + # return new WOFF2Reader object + from fontTools.ttLib.woff2 import WOFF2Reader + + return object.__new__(WOFF2Reader) + # return default object + return object.__new__(cls) + + def __init__(self, file, checkChecksums=0, fontNumber=-1): + self.file = file + self.checkChecksums = checkChecksums + + self.flavor = None + self.flavorData = None + self.DirectoryEntry = SFNTDirectoryEntry + self.file.seek(0) + self.sfntVersion = self.file.read(4) + self.file.seek(0) + if self.sfntVersion == b"ttcf": + header = readTTCHeader(self.file) + numFonts = header.numFonts + if not 0 <= fontNumber < numFonts: + raise TTLibFileIsCollectionError( + "specify a font number between 0 and %d (inclusive)" + % (numFonts - 1) + ) + self.numFonts = numFonts + self.file.seek(header.offsetTable[fontNumber]) + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + raise TTLibError("Not a Font Collection (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + elif self.sfntVersion == b"wOFF": + self.flavor = "woff" + self.DirectoryEntry = WOFFDirectoryEntry + data = self.file.read(woffDirectorySize) + if len(data) != woffDirectorySize: + raise TTLibError("Not a WOFF font (not enough data)") + sstruct.unpack(woffDirectoryFormat, data, self) + else: + data = self.file.read(sfntDirectorySize) + if len(data) != sfntDirectorySize: + raise TTLibError("Not a TrueType or OpenType font (not enough data)") + sstruct.unpack(sfntDirectoryFormat, data, self) + self.sfntVersion = Tag(self.sfntVersion) + + if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"): + raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + tables = {} + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + tables[tag] = entry + self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset)) + + # Load flavor data if any + if self.flavor == "woff": + self.flavorData = WOFFFlavorData(self) + + def has_key(self, tag): + return tag in self.tables + + __contains__ = has_key + + def keys(self): + return self.tables.keys() + + def __getitem__(self, tag): + """Fetch the raw table data.""" + entry = self.tables[Tag(tag)] + data = entry.loadData(self.file) + if self.checkChecksums: + if tag == "head": + # Beh: we have to special-case the 'head' table. + checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) + else: + checksum = calcChecksum(data) + if self.checkChecksums > 1: + # Be obnoxious, and barf when it's wrong + assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag + elif checksum != entry.checkSum: + # Be friendly, and just log a warning. + log.warning("bad checksum for '%s' table", tag) + return data + + def __delitem__(self, tag): + del self.tables[Tag(tag)] + + def close(self): + self.file.close() + + # We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able + # and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a + # reference to an external file object which is not pickleable. So in __getstate__ + # we store the file name and current position, and in __setstate__ we reopen the + # same named file after unpickling. + + def __getstate__(self): + if isinstance(self.file, BytesIO): + # BytesIO is already pickleable, return the state unmodified + return self.__dict__ + + # remove unpickleable file attribute, and only store its name and pos + state = self.__dict__.copy() + del state["file"] + state["_filename"] = self.file.name + state["_filepos"] = self.file.tell() + return state + + def __setstate__(self, state): + if "file" not in state: + self.file = open(state.pop("_filename"), "rb") + self.file.seek(state.pop("_filepos")) + self.__dict__.update(state) + + +# default compression level for WOFF 1.0 tables and metadata +ZLIB_COMPRESSION_LEVEL = 6 + +# if set to True, use zopfli instead of zlib for compressing WOFF 1.0. +# The Python bindings are available at https://pypi.python.org/pypi/zopfli +USE_ZOPFLI = False + +# mapping between zlib's compression levels and zopfli's 'numiterations'. +# Use lower values for files over several MB in size or it will be too slow +ZOPFLI_LEVELS = { + # 0: 0, # can't do 0 iterations... + 1: 1, + 2: 3, + 3: 5, + 4: 8, + 5: 10, + 6: 15, + 7: 25, + 8: 50, + 9: 100, +} + + +def compress(data, level=ZLIB_COMPRESSION_LEVEL): + """Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True, + zopfli is used instead of the zlib module. + The compression 'level' must be between 0 and 9. 1 gives best speed, + 9 gives best compression (0 gives no compression at all). + The default value is a compromise between speed and compression (6). + """ + if not (0 <= level <= 9): + raise ValueError("Bad compression level: %s" % level) + if not USE_ZOPFLI or level == 0: + from zlib import compress + + return compress(data, level) + else: + from zopfli.zlib import compress + + return compress(data, numiterations=ZOPFLI_LEVELS[level]) + + +class SFNTWriter(object): + def __new__(cls, *args, **kwargs): + """Return an instance of the SFNTWriter sub-class which is compatible + with the specified 'flavor'. + """ + flavor = None + if kwargs and "flavor" in kwargs: + flavor = kwargs["flavor"] + elif args and len(args) > 3: + flavor = args[3] + if cls is SFNTWriter: + if flavor == "woff2": + # return new WOFF2Writer object + from fontTools.ttLib.woff2 import WOFF2Writer + + return object.__new__(WOFF2Writer) + # return default object + return object.__new__(cls) + + def __init__( + self, + file, + numTables, + sfntVersion="\000\001\000\000", + flavor=None, + flavorData=None, + ): + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavor = flavor + self.flavorData = flavorData + + if self.flavor == "woff": + self.directoryFormat = woffDirectoryFormat + self.directorySize = woffDirectorySize + self.DirectoryEntry = WOFFDirectoryEntry + + self.signature = "wOFF" + + # to calculate WOFF checksum adjustment, we also need the original SFNT offsets + self.origNextTableOffset = ( + sfntDirectorySize + numTables * sfntDirectoryEntrySize + ) + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + self.directoryFormat = sfntDirectoryFormat + self.directorySize = sfntDirectorySize + self.DirectoryEntry = SFNTDirectoryEntry + + from fontTools.ttLib import getSearchRange + + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( + numTables, 16 + ) + + self.directoryOffset = self.file.tell() + self.nextTableOffset = ( + self.directoryOffset + + self.directorySize + + numTables * self.DirectoryEntry.formatSize + ) + # clear out directory area + self.file.seek(self.nextTableOffset) + # make sure we're actually where we want to be. (old cStringIO bug) + self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) + self.tables = OrderedDict() + + def setEntry(self, tag, entry): + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + + self.tables[tag] = entry + + def __setitem__(self, tag, data): + """Write raw table data to disk.""" + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + + entry = self.DirectoryEntry() + entry.tag = tag + entry.offset = self.nextTableOffset + if tag == "head": + entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) + self.headTable = data + entry.uncompressed = True + else: + entry.checkSum = calcChecksum(data) + entry.saveData(self.file, data) + + if self.flavor == "woff": + entry.origOffset = self.origNextTableOffset + self.origNextTableOffset += (entry.origLength + 3) & ~3 + + self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3) + # Add NUL bytes to pad the table data to a 4-byte boundary. + # Don't depend on f.seek() as we need to add the padding even if no + # subsequent write follows (seek is lazy), ie. after the final table + # in the font. + self.file.write(b"\0" * (self.nextTableOffset - self.file.tell())) + assert self.nextTableOffset == self.file.tell() + + self.setEntry(tag, entry) + + def __getitem__(self, tag): + return self.tables[tag] + + def close(self): + """All tables must have been written to disk. Now write the + directory. + """ + tables = sorted(self.tables.items()) + if len(tables) != self.numTables: + raise TTLibError( + "wrong number of tables; expected %d, found %d" + % (self.numTables, len(tables)) + ) + + if self.flavor == "woff": + self.signature = b"wOFF" + self.reserved = 0 + + self.totalSfntSize = 12 + self.totalSfntSize += 16 * len(tables) + for tag, entry in tables: + self.totalSfntSize += (entry.origLength + 3) & ~3 + + data = self.flavorData if self.flavorData else WOFFFlavorData() + if data.majorVersion is not None and data.minorVersion is not None: + self.majorVersion = data.majorVersion + self.minorVersion = data.minorVersion + else: + if hasattr(self, "headTable"): + self.majorVersion, self.minorVersion = struct.unpack( + ">HH", self.headTable[4:8] + ) + else: + self.majorVersion = self.minorVersion = 0 + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.file.seek(0, 2) + self.metaOffset = self.file.tell() + compressedMetaData = compress(data.metaData) + self.metaLength = len(compressedMetaData) + self.file.write(compressedMetaData) + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + if data.privData: + self.file.seek(0, 2) + off = self.file.tell() + paddedOff = (off + 3) & ~3 + self.file.write(b"\0" * (paddedOff - off)) + self.privOffset = self.file.tell() + self.privLength = len(data.privData) + self.file.write(data.privData) + else: + self.privOffset = self.privLength = 0 + + self.file.seek(0, 2) + self.length = self.file.tell() + + else: + assert not self.flavor, "Unknown flavor '%s'" % self.flavor + pass + + directory = sstruct.pack(self.directoryFormat, self) + + self.file.seek(self.directoryOffset + self.directorySize) + seenHead = 0 + for tag, entry in tables: + if tag == "head": + seenHead = 1 + directory = directory + entry.toString() + if seenHead: + self.writeMasterChecksum(directory) + self.file.seek(self.directoryOffset) + self.file.write(directory) + + def _calcMasterChecksum(self, directory): + # calculate checkSumAdjustment + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + if self.DirectoryEntry != SFNTDirectoryEntry: + # Create a SFNT directory for checksum calculation purposes + from fontTools.ttLib import getSearchRange + + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( + self.numTables, 16 + ) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xFFFFFFFF + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF + return checksumadjustment + + def writeMasterChecksum(self, directory): + checksumadjustment = self._calcMasterChecksum(directory) + # write the checksum to the file + self.file.seek(self.tables["head"].offset + 8) + self.file.write(struct.pack(">L", checksumadjustment)) + + def reordersTables(self): + return False + + +# -- sfnt directory helpers and cruft + +ttcHeaderFormat = """ + > # big endian + TTCTag: 4s # "ttcf" + Version: L # 0x00010000 or 0x00020000 + numFonts: L # number of fonts + # OffsetTable[numFonts]: L # array with offsets from beginning of file + # ulDsigTag: L # version 2.0 only + # ulDsigLength: L # version 2.0 only + # ulDsigOffset: L # version 2.0 only +""" + +ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat) + +sfntDirectoryFormat = """ + > # big endian + sfntVersion: 4s + numTables: H # number of tables + searchRange: H # (max2 <= numTables)*16 + entrySelector: H # log2(max2 <= numTables) + rangeShift: H # numTables*16-searchRange +""" + +sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat) + +sfntDirectoryEntryFormat = """ + > # big endian + tag: 4s + checkSum: L + offset: L + length: L +""" + +sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat) + +woffDirectoryFormat = """ + > # big endian + signature: 4s # "wOFF" + sfntVersion: 4s + length: L # total woff file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woffDirectorySize = sstruct.calcsize(woffDirectoryFormat) + +woffDirectoryEntryFormat = """ + > # big endian + tag: 4s + offset: L + length: L # compressed length + origLength: L # original length + checkSum: L # original checksum +""" + +woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat) + + +class DirectoryEntry(object): + def __init__(self): + self.uncompressed = False # if True, always embed entry raw + + def fromFile(self, file): + sstruct.unpack(self.format, file.read(self.formatSize), self) + + def fromString(self, str): + sstruct.unpack(self.format, str, self) + + def toString(self): + return sstruct.pack(self.format, self) + + def __repr__(self): + if hasattr(self, "tag"): + return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self)) + else: + return "<%s at %x>" % (self.__class__.__name__, id(self)) + + def loadData(self, file): + file.seek(self.offset) + data = file.read(self.length) + assert len(data) == self.length + if hasattr(self.__class__, "decodeData"): + data = self.decodeData(data) + return data + + def saveData(self, file, data): + if hasattr(self.__class__, "encodeData"): + data = self.encodeData(data) + self.length = len(data) + file.seek(self.offset) + file.write(data) + + def decodeData(self, rawData): + return rawData + + def encodeData(self, data): + return data + + +class SFNTDirectoryEntry(DirectoryEntry): + format = sfntDirectoryEntryFormat + formatSize = sfntDirectoryEntrySize + + +class WOFFDirectoryEntry(DirectoryEntry): + format = woffDirectoryEntryFormat + formatSize = woffDirectoryEntrySize + + def __init__(self): + super(WOFFDirectoryEntry, self).__init__() + # With fonttools<=3.1.2, the only way to set a different zlib + # compression level for WOFF directory entries was to set the class + # attribute 'zlibCompressionLevel'. This is now replaced by a globally + # defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when + # compressing the metadata. For backward compatibility, we still + # use the class attribute if it was already set. + if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"): + self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL + + def decodeData(self, rawData): + import zlib + + if self.length == self.origLength: + data = rawData + else: + assert self.length < self.origLength + data = zlib.decompress(rawData) + assert len(data) == self.origLength + return data + + def encodeData(self, data): + self.origLength = len(data) + if not self.uncompressed: + compressedData = compress(data, self.zlibCompressionLevel) + if self.uncompressed or len(compressedData) >= self.origLength: + # Encode uncompressed + rawData = data + self.length = self.origLength + else: + rawData = compressedData + self.length = len(rawData) + return rawData + + +class WOFFFlavorData: + Flavor = "woff" + + def __init__(self, reader=None): + self.majorVersion = None + self.minorVersion = None + self.metaData = None + self.privData = None + if reader: + self.majorVersion = reader.majorVersion + self.minorVersion = reader.minorVersion + if reader.metaLength: + reader.file.seek(reader.metaOffset) + rawData = reader.file.read(reader.metaLength) + assert len(rawData) == reader.metaLength + data = self._decompress(rawData) + assert len(data) == reader.metaOrigLength + self.metaData = data + if reader.privLength: + reader.file.seek(reader.privOffset) + data = reader.file.read(reader.privLength) + assert len(data) == reader.privLength + self.privData = data + + def _decompress(self, rawData): + import zlib + + return zlib.decompress(rawData) + + +def calcChecksum(data): + """Calculate the checksum for an arbitrary block of data. + + If the data length is not a multiple of four, it assumes + it is to be padded with null byte. + + >>> print(calcChecksum(b"abcd")) + 1633837924 + >>> print(calcChecksum(b"abcdxyz")) + 3655064932 + """ + remainder = len(data) % 4 + if remainder: + data += b"\0" * (4 - remainder) + value = 0 + blockSize = 4096 + assert blockSize % 4 == 0 + for i in range(0, len(data), blockSize): + block = data[i : i + blockSize] + longs = struct.unpack(">%dL" % (len(block) // 4), block) + value = (value + sum(longs)) & 0xFFFFFFFF + return value + + +def readTTCHeader(file): + file.seek(0) + data = file.read(ttcHeaderSize) + if len(data) != ttcHeaderSize: + raise TTLibError("Not a Font Collection (not enough data)") + self = SimpleNamespace() + sstruct.unpack(ttcHeaderFormat, data, self) + if self.TTCTag != "ttcf": + raise TTLibError("Not a Font Collection") + assert self.Version == 0x00010000 or self.Version == 0x00020000, ( + "unrecognized TTC version 0x%08x" % self.Version + ) + self.offsetTable = struct.unpack( + ">%dL" % self.numFonts, file.read(self.numFonts * 4) + ) + if self.Version == 0x00020000: + pass # ignoring version 2.0 signatures + return self + + +def writeTTCHeader(file, numFonts): + self = SimpleNamespace() + self.TTCTag = "ttcf" + self.Version = 0x00010000 + self.numFonts = numFonts + file.seek(0) + file.write(sstruct.pack(ttcHeaderFormat, self)) + offset = file.tell() + file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts))) + return offset + + +if __name__ == "__main__": + import sys + import doctest + + sys.exit(doctest.testmod().failed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/standardGlyphOrder.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/standardGlyphOrder.py new file mode 100644 index 0000000000000000000000000000000000000000..4062385240096ac822814aebb8bf7c59cf003a8f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/standardGlyphOrder.py @@ -0,0 +1,271 @@ +# +# 'post' table formats 1.0 and 2.0 rely on this list of "standard" +# glyphs. +# +# My list is correct according to the Apple documentation for the 'post' table: +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html +# (However, it seems that TTFdump (from MS) and FontLab disagree, at +# least with respect to the last glyph, which they list as 'dslash' +# instead of 'dcroat'.) +# + +standardGlyphOrder = [ + ".notdef", # 0 + ".null", # 1 + "nonmarkingreturn", # 2 + "space", # 3 + "exclam", # 4 + "quotedbl", # 5 + "numbersign", # 6 + "dollar", # 7 + "percent", # 8 + "ampersand", # 9 + "quotesingle", # 10 + "parenleft", # 11 + "parenright", # 12 + "asterisk", # 13 + "plus", # 14 + "comma", # 15 + "hyphen", # 16 + "period", # 17 + "slash", # 18 + "zero", # 19 + "one", # 20 + "two", # 21 + "three", # 22 + "four", # 23 + "five", # 24 + "six", # 25 + "seven", # 26 + "eight", # 27 + "nine", # 28 + "colon", # 29 + "semicolon", # 30 + "less", # 31 + "equal", # 32 + "greater", # 33 + "question", # 34 + "at", # 35 + "A", # 36 + "B", # 37 + "C", # 38 + "D", # 39 + "E", # 40 + "F", # 41 + "G", # 42 + "H", # 43 + "I", # 44 + "J", # 45 + "K", # 46 + "L", # 47 + "M", # 48 + "N", # 49 + "O", # 50 + "P", # 51 + "Q", # 52 + "R", # 53 + "S", # 54 + "T", # 55 + "U", # 56 + "V", # 57 + "W", # 58 + "X", # 59 + "Y", # 60 + "Z", # 61 + "bracketleft", # 62 + "backslash", # 63 + "bracketright", # 64 + "asciicircum", # 65 + "underscore", # 66 + "grave", # 67 + "a", # 68 + "b", # 69 + "c", # 70 + "d", # 71 + "e", # 72 + "f", # 73 + "g", # 74 + "h", # 75 + "i", # 76 + "j", # 77 + "k", # 78 + "l", # 79 + "m", # 80 + "n", # 81 + "o", # 82 + "p", # 83 + "q", # 84 + "r", # 85 + "s", # 86 + "t", # 87 + "u", # 88 + "v", # 89 + "w", # 90 + "x", # 91 + "y", # 92 + "z", # 93 + "braceleft", # 94 + "bar", # 95 + "braceright", # 96 + "asciitilde", # 97 + "Adieresis", # 98 + "Aring", # 99 + "Ccedilla", # 100 + "Eacute", # 101 + "Ntilde", # 102 + "Odieresis", # 103 + "Udieresis", # 104 + "aacute", # 105 + "agrave", # 106 + "acircumflex", # 107 + "adieresis", # 108 + "atilde", # 109 + "aring", # 110 + "ccedilla", # 111 + "eacute", # 112 + "egrave", # 113 + "ecircumflex", # 114 + "edieresis", # 115 + "iacute", # 116 + "igrave", # 117 + "icircumflex", # 118 + "idieresis", # 119 + "ntilde", # 120 + "oacute", # 121 + "ograve", # 122 + "ocircumflex", # 123 + "odieresis", # 124 + "otilde", # 125 + "uacute", # 126 + "ugrave", # 127 + "ucircumflex", # 128 + "udieresis", # 129 + "dagger", # 130 + "degree", # 131 + "cent", # 132 + "sterling", # 133 + "section", # 134 + "bullet", # 135 + "paragraph", # 136 + "germandbls", # 137 + "registered", # 138 + "copyright", # 139 + "trademark", # 140 + "acute", # 141 + "dieresis", # 142 + "notequal", # 143 + "AE", # 144 + "Oslash", # 145 + "infinity", # 146 + "plusminus", # 147 + "lessequal", # 148 + "greaterequal", # 149 + "yen", # 150 + "mu", # 151 + "partialdiff", # 152 + "summation", # 153 + "product", # 154 + "pi", # 155 + "integral", # 156 + "ordfeminine", # 157 + "ordmasculine", # 158 + "Omega", # 159 + "ae", # 160 + "oslash", # 161 + "questiondown", # 162 + "exclamdown", # 163 + "logicalnot", # 164 + "radical", # 165 + "florin", # 166 + "approxequal", # 167 + "Delta", # 168 + "guillemotleft", # 169 + "guillemotright", # 170 + "ellipsis", # 171 + "nonbreakingspace", # 172 + "Agrave", # 173 + "Atilde", # 174 + "Otilde", # 175 + "OE", # 176 + "oe", # 177 + "endash", # 178 + "emdash", # 179 + "quotedblleft", # 180 + "quotedblright", # 181 + "quoteleft", # 182 + "quoteright", # 183 + "divide", # 184 + "lozenge", # 185 + "ydieresis", # 186 + "Ydieresis", # 187 + "fraction", # 188 + "currency", # 189 + "guilsinglleft", # 190 + "guilsinglright", # 191 + "fi", # 192 + "fl", # 193 + "daggerdbl", # 194 + "periodcentered", # 195 + "quotesinglbase", # 196 + "quotedblbase", # 197 + "perthousand", # 198 + "Acircumflex", # 199 + "Ecircumflex", # 200 + "Aacute", # 201 + "Edieresis", # 202 + "Egrave", # 203 + "Iacute", # 204 + "Icircumflex", # 205 + "Idieresis", # 206 + "Igrave", # 207 + "Oacute", # 208 + "Ocircumflex", # 209 + "apple", # 210 + "Ograve", # 211 + "Uacute", # 212 + "Ucircumflex", # 213 + "Ugrave", # 214 + "dotlessi", # 215 + "circumflex", # 216 + "tilde", # 217 + "macron", # 218 + "breve", # 219 + "dotaccent", # 220 + "ring", # 221 + "cedilla", # 222 + "hungarumlaut", # 223 + "ogonek", # 224 + "caron", # 225 + "Lslash", # 226 + "lslash", # 227 + "Scaron", # 228 + "scaron", # 229 + "Zcaron", # 230 + "zcaron", # 231 + "brokenbar", # 232 + "Eth", # 233 + "eth", # 234 + "Yacute", # 235 + "yacute", # 236 + "Thorn", # 237 + "thorn", # 238 + "minus", # 239 + "multiply", # 240 + "onesuperior", # 241 + "twosuperior", # 242 + "threesuperior", # 243 + "onehalf", # 244 + "onequarter", # 245 + "threequarters", # 246 + "franc", # 247 + "Gbreve", # 248 + "gbreve", # 249 + "Idotaccent", # 250 + "Scedilla", # 251 + "scedilla", # 252 + "Cacute", # 253 + "cacute", # 254 + "Ccaron", # 255 + "ccaron", # 256 + "dcroat", # 257 +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/BitmapGlyphMetrics.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/BitmapGlyphMetrics.py new file mode 100644 index 0000000000000000000000000000000000000000..10b4f828213b8320d54eefed3d5e66f2ba532101 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/BitmapGlyphMetrics.py @@ -0,0 +1,64 @@ +# Since bitmap glyph metrics are shared between EBLC and EBDT +# this class gets its own python file. +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +import logging + + +log = logging.getLogger(__name__) + +bigGlyphMetricsFormat = """ + > # big endian + height: B + width: B + horiBearingX: b + horiBearingY: b + horiAdvance: B + vertBearingX: b + vertBearingY: b + vertAdvance: B +""" + +smallGlyphMetricsFormat = """ + > # big endian + height: B + width: B + BearingX: b + BearingY: b + Advance: B +""" + + +class BitmapGlyphMetrics(object): + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__) + writer.newline() + for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]: + writer.simpletag(metricName, value=getattr(self, metricName)) + writer.newline() + writer.endtag(self.__class__.__name__) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1]) + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + # Make sure this is a metric that is needed by GlyphMetrics. + if name in metricNames: + vars(self)[name] = safeEval(attrs["value"]) + else: + log.warning( + "unknown name '%s' being ignored in %s.", + name, + self.__class__.__name__, + ) + + +class BigGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = bigGlyphMetricsFormat + + +class SmallGlyphMetrics(BitmapGlyphMetrics): + binaryFormat = smallGlyphMetricsFormat diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_B_L_C_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_B_L_C_.py new file mode 100644 index 0000000000000000000000000000000000000000..fc3974ece04e736509b69bc2977171ab160cb351 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_B_L_C_.py @@ -0,0 +1,9 @@ +# Copyright 2013 Google, Inc. All Rights Reserved. +# +# Google Author(s): Matt Fontaine + +from . import E_B_L_C_ + + +class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_): + dependencies = ["CBDT"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F_.py new file mode 100644 index 0000000000000000000000000000000000000000..c231599e37b3a5864a774387d717baf297957876 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/C_F_F_.py @@ -0,0 +1,46 @@ +from io import BytesIO +from fontTools import cffLib +from . import DefaultTable + + +class table_C_F_F_(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.cff = cffLib.CFFFontSet() + self._gaveGlyphOrder = False + + def decompile(self, data, otFont): + self.cff.decompile(BytesIO(data), otFont, isCFF2=False) + assert len(self.cff) == 1, "can't deal with multi-font CFF tables." + + def compile(self, otFont): + f = BytesIO() + self.cff.compile(f, otFont, isCFF2=False) + return f.getvalue() + + def haveGlyphNames(self): + if hasattr(self.cff[self.cff.fontNames[0]], "ROS"): + return False # CID-keyed font + else: + return True + + def getGlyphOrder(self): + if self._gaveGlyphOrder: + from fontTools import ttLib + + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + self._gaveGlyphOrder = True + return self.cff[self.cff.fontNames[0]].getGlyphOrder() + + def setGlyphOrder(self, glyphOrder): + pass + # XXX + # self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder) + + def toXML(self, writer, otFont): + self.cff.toXML(writer) + + def fromXML(self, name, attrs, content, otFont): + if not hasattr(self, "cff"): + self.cff = cffLib.CFFFontSet() + self.cff.fromXML(name, attrs, content, otFont) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D_S_I_G_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D_S_I_G_.py new file mode 100644 index 0000000000000000000000000000000000000000..d902a29080aff5a275f530c7658d3c9eb4498034 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/D_S_I_G_.py @@ -0,0 +1,151 @@ +from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval +from fontTools.misc import sstruct +from . import DefaultTable +import base64 + +DSIG_HeaderFormat = """ + > # big endian + ulVersion: L + usNumSigs: H + usFlag: H +""" +# followed by an array of usNumSigs DSIG_Signature records +DSIG_SignatureFormat = """ + > # big endian + ulFormat: L + ulLength: L # length includes DSIG_SignatureBlock header + ulOffset: L +""" +# followed by an array of usNumSigs DSIG_SignatureBlock records, +# each followed immediately by the pkcs7 bytes +DSIG_SignatureBlockFormat = """ + > # big endian + usReserved1: H + usReserved2: H + cbSignature: l # length of following raw pkcs7 data +""" + +# +# NOTE +# the DSIG table format allows for SignatureBlocks residing +# anywhere in the table and possibly in a different order as +# listed in the array after the first table header +# +# this implementation does not keep track of any gaps and/or data +# before or after the actual signature blocks while decompiling, +# and puts them in the same physical order as listed in the header +# on compilation with no padding whatsoever. +# + + +class table_D_S_I_G_(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self) + assert self.ulVersion == 1, "DSIG ulVersion must be 1" + assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0" + self.signatureRecords = sigrecs = [] + for n in range(self.usNumSigs): + sigrec, newData = sstruct.unpack2( + DSIG_SignatureFormat, newData, SignatureRecord() + ) + assert sigrec.ulFormat == 1, ( + "DSIG signature record #%d ulFormat must be 1" % n + ) + sigrecs.append(sigrec) + for sigrec in sigrecs: + dummy, newData = sstruct.unpack2( + DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec + ) + assert sigrec.usReserved1 == 0, ( + "DSIG signature record #%d usReserverd1 must be 0" % n + ) + assert sigrec.usReserved2 == 0, ( + "DSIG signature record #%d usReserverd2 must be 0" % n + ) + sigrec.pkcs7 = newData[: sigrec.cbSignature] + + def compile(self, ttFont): + packed = sstruct.pack(DSIG_HeaderFormat, self) + headers = [packed] + offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat) + data = [] + for sigrec in self.signatureRecords: + # first pack signature block + sigrec.cbSignature = len(sigrec.pkcs7) + packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7 + data.append(packed) + # update redundant length field + sigrec.ulLength = len(packed) + # update running table offset + sigrec.ulOffset = offset + headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec)) + offset += sigrec.ulLength + if offset % 2: + # Pad to even bytes + data.append(b"\0") + return bytesjoin(headers + data) + + def toXML(self, xmlWriter, ttFont): + xmlWriter.comment( + "note that the Digital Signature will be invalid after recompilation!" + ) + xmlWriter.newline() + xmlWriter.simpletag( + "tableHeader", + version=self.ulVersion, + numSigs=self.usNumSigs, + flag="0x%X" % self.usFlag, + ) + for sigrec in self.signatureRecords: + xmlWriter.newline() + sigrec.toXML(xmlWriter, ttFont) + xmlWriter.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableHeader": + self.signatureRecords = [] + self.ulVersion = safeEval(attrs["version"]) + self.usNumSigs = safeEval(attrs["numSigs"]) + self.usFlag = safeEval(attrs["flag"]) + return + if name == "SignatureRecord": + sigrec = SignatureRecord() + sigrec.fromXML(name, attrs, content, ttFont) + self.signatureRecords.append(sigrec) + + +pem_spam = lambda l, spam={ + "-----BEGIN PKCS7-----": True, + "-----END PKCS7-----": True, + "": True, +}: not spam.get(l.strip()) + + +def b64encode(b): + s = base64.b64encode(b) + # Line-break at 76 chars. + items = [] + while s: + items.append(tostr(s[:76])) + items.append("\n") + s = s[76:] + return strjoin(items) + + +class SignatureRecord(object): + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self.__dict__) + + def toXML(self, writer, ttFont): + writer.begintag(self.__class__.__name__, format=self.ulFormat) + writer.newline() + writer.write_noindent("-----BEGIN PKCS7-----\n") + writer.write_noindent(b64encode(self.pkcs7)) + writer.write_noindent("-----END PKCS7-----\n") + writer.endtag(self.__class__.__name__) + + def fromXML(self, name, attrs, content, ttFont): + self.ulFormat = safeEval(attrs["format"]) + self.usReserved1 = safeEval(attrs.get("reserved1", "0")) + self.usReserved2 = safeEval(attrs.get("reserved2", "0")) + self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content)))) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F__e_a_t.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F__e_a_t.py new file mode 100644 index 0000000000000000000000000000000000000000..fbcd6ca6e7bc0640263ddab74e1e1c89ea61bbfb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/F__e_a_t.py @@ -0,0 +1,144 @@ +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import floatToFixedToStr +from fontTools.misc.textTools import safeEval +from . import DefaultTable +from . import grUtils +import struct + +Feat_hdr_format = """ + > + version: 16.16F +""" + + +class table_F__e_a_t(DefaultTable.DefaultTable): + """The ``Feat`` table is used exclusively by the Graphite shaping engine + to store features and possible settings specified in GDL. Graphite features + determine what rules are applied to transform a glyph stream. + + Not to be confused with ``feat``, or the OpenType Layout tables + ``GSUB``/``GPOS``.""" + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.features = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Feat_hdr_format, data, self) + self.version = float(floatToFixedToStr(self.version, precisionBits=16)) + (numFeats,) = struct.unpack(">H", data[:2]) + data = data[8:] + allfeats = [] + maxsetting = 0 + for i in range(numFeats): + if self.version >= 2.0: + (fid, nums, _, offset, flags, lid) = struct.unpack( + ">LHHLHH", data[16 * i : 16 * (i + 1)] + ) + offset = int((offset - 12 - 16 * numFeats) / 4) + else: + (fid, nums, offset, flags, lid) = struct.unpack( + ">HHLHH", data[12 * i : 12 * (i + 1)] + ) + offset = int((offset - 12 - 12 * numFeats) / 4) + allfeats.append((fid, nums, offset, flags, lid)) + maxsetting = max(maxsetting, offset + nums) + data = data[16 * numFeats :] + allsettings = [] + for i in range(maxsetting): + if len(data) >= 4 * (i + 1): + (val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)]) + allsettings.append((val, lid)) + for i, f in enumerate(allfeats): + (fid, nums, offset, flags, lid) = f + fobj = Feature() + fobj.flags = flags + fobj.label = lid + self.features[grUtils.num2tag(fid)] = fobj + fobj.settings = {} + fobj.default = None + fobj.index = i + for i in range(offset, offset + nums): + if i >= len(allsettings): + continue + (vid, vlid) = allsettings[i] + fobj.settings[vid] = vlid + if fobj.default is None: + fobj.default = vid + + def compile(self, ttFont): + fdat = b"" + vdat = b"" + offset = 0 + for f, v in sorted(self.features.items(), key=lambda x: x[1].index): + fnum = grUtils.tag2num(f) + if self.version >= 2.0: + fdat += struct.pack( + ">LHHLHH", + grUtils.tag2num(f), + len(v.settings), + 0, + offset * 4 + 12 + 16 * len(self.features), + v.flags, + v.label, + ) + elif fnum > 65535: # self healing for alphabetic ids + self.version = 2.0 + return self.compile(ttFont) + else: + fdat += struct.pack( + ">HHLHH", + grUtils.tag2num(f), + len(v.settings), + offset * 4 + 12 + 12 * len(self.features), + v.flags, + v.label, + ) + for s, l in sorted( + v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x + ): + vdat += struct.pack(">HH", s, l) + offset += len(v.settings) + hdr = sstruct.pack(Feat_hdr_format, self) + return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat + + def toXML(self, writer, ttFont): + writer.simpletag("version", version=self.version) + writer.newline() + for f, v in sorted(self.features.items(), key=lambda x: x[1].index): + writer.begintag( + "feature", + fid=f, + label=v.label, + flags=v.flags, + default=(v.default if v.default else 0), + ) + writer.newline() + for s, l in sorted(v.settings.items()): + writer.simpletag("setting", value=s, label=l) + writer.newline() + writer.endtag("feature") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = float(safeEval(attrs["version"])) + elif name == "feature": + fid = attrs["fid"] + fobj = Feature() + fobj.flags = int(safeEval(attrs["flags"])) + fobj.label = int(safeEval(attrs["label"])) + fobj.default = int(safeEval(attrs.get("default", "0"))) + fobj.index = len(self.features) + self.features[fid] = fobj + fobj.settings = {} + for element in content: + if not isinstance(element, tuple): + continue + tag, a, c = element + if tag == "setting": + fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"])) + + +class Feature(object): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_M_A_P_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_M_A_P_.py new file mode 100644 index 0000000000000000000000000000000000000000..949ef842ef794128990e29156c824b1fce2d8659 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_M_A_P_.py @@ -0,0 +1,141 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import tobytes, tostr, safeEval +from . import DefaultTable + +GMAPFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + flags: H + recordsCount: H + recordsOffset: H + fontNameLength: H +""" +# psFontName is a byte string which follows the record above. This is zero padded +# to the beginning of the records array. The recordsOffsst is 32 bit aligned. + +GMAPRecordFormat1 = """ + > # big endian + UV: L + cid: H + gid: H + ggid: H + name: 32s +""" + + +class GMAPRecord(object): + def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""): + self.UV = uv + self.cid = cid + self.gid = gid + self.ggid = ggid + self.name = name + + def toXML(self, writer, ttFont): + writer.begintag("GMAPRecord") + writer.newline() + writer.simpletag("UV", value=self.UV) + writer.newline() + writer.simpletag("cid", value=self.cid) + writer.newline() + writer.simpletag("gid", value=self.gid) + writer.newline() + writer.simpletag("glyphletGid", value=self.gid) + writer.newline() + writer.simpletag("GlyphletName", value=self.name) + writer.newline() + writer.endtag("GMAPRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "GlyphletName": + self.name = value + else: + setattr(self, name, safeEval(value)) + + def compile(self, ttFont): + if self.UV is None: + self.UV = 0 + nameLen = len(self.name) + if nameLen < 32: + self.name = self.name + "\0" * (32 - nameLen) + data = sstruct.pack(GMAPRecordFormat1, self) + return data + + def __repr__(self): + return ( + "GMAPRecord[ UV: " + + str(self.UV) + + ", cid: " + + str(self.cid) + + ", gid: " + + str(self.gid) + + ", ggid: " + + str(self.ggid) + + ", Glyphlet Name: " + + str(self.name) + + " ]" + ) + + +class table_G_M_A_P_(DefaultTable.DefaultTable): + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(GMAPFormat, data, self) + self.psFontName = tostr(newData[: self.fontNameLength]) + assert ( + self.recordsOffset % 4 + ) == 0, "GMAP error: recordsOffset is not 32 bit aligned." + newData = data[self.recordsOffset :] + self.gmapRecords = [] + for i in range(self.recordsCount): + gmapRecord, newData = sstruct.unpack2( + GMAPRecordFormat1, newData, GMAPRecord() + ) + gmapRecord.name = gmapRecord.name.strip("\0") + self.gmapRecords.append(gmapRecord) + + def compile(self, ttFont): + self.recordsCount = len(self.gmapRecords) + self.fontNameLength = len(self.psFontName) + self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4) + data = sstruct.pack(GMAPFormat, self) + data = data + tobytes(self.psFontName) + data = data + b"\0" * (self.recordsOffset - len(data)) + for record in self.gmapRecords: + data = data + record.compile(ttFont) + return data + + def toXML(self, writer, ttFont): + writer.comment("Most of this table will be recalculated by the compiler") + writer.newline() + formatstring, names, fixes = sstruct.getformat(GMAPFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + writer.simpletag("PSFontName", value=self.psFontName) + writer.newline() + for gmapRecord in self.gmapRecords: + gmapRecord.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GMAPRecord": + if not hasattr(self, "gmapRecords"): + self.gmapRecords = [] + gmapRecord = GMAPRecord() + self.gmapRecords.append(gmapRecord) + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + gmapRecord.fromXML(name, attrs, content, ttFont) + else: + value = attrs["value"] + if name == "PSFontName": + self.psFontName = value + else: + setattr(self, name, safeEval(value)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_S_U_B_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_S_U_B_.py new file mode 100644 index 0000000000000000000000000000000000000000..bb8375a5f83029d2b05388d5c882edd9c4aba95c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/G_S_U_B_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_G_S_U_B_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/H_V_A_R_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/H_V_A_R_.py new file mode 100644 index 0000000000000000000000000000000000000000..094aedaea5ebc5c88b33e448ea8f131563acd3c0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/H_V_A_R_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_H_V_A_R_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_E_T_A_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_E_T_A_.py new file mode 100644 index 0000000000000000000000000000000000000000..445aeb4dea3c7c32b9b7f6fa1203bf9090fc6c29 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_E_T_A_.py @@ -0,0 +1,345 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import byteord, safeEval +from . import DefaultTable +import pdb +import struct + + +METAHeaderFormat = """ + > # big endian + tableVersionMajor: H + tableVersionMinor: H + metaEntriesVersionMajor: H + metaEntriesVersionMinor: H + unicodeVersion: L + metaFlags: H + nMetaRecs: H +""" +# This record is followed by nMetaRecs of METAGlyphRecordFormat. +# This in turn is followd by as many METAStringRecordFormat entries +# as specified by the METAGlyphRecordFormat entries +# this is followed by the strings specifried in the METAStringRecordFormat +METAGlyphRecordFormat = """ + > # big endian + glyphID: H + nMetaEntry: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG hdrOffset +# Offset from start of META table to the beginning +# of this glyphs array of ns Metadata string entries. +# Size determined by metaFlags field +# METAGlyphRecordFormat entries must be sorted by glyph ID + +METAStringRecordFormat = """ + > # big endian + labelID: H + stringLen: H +""" +# This record is followd by a variable data length field: +# USHORT or ULONG stringOffset +# METAStringRecordFormat entries must be sorted in order of labelID +# There may be more than one entry with the same labelID +# There may be more than one strign with the same content. + +# Strings shall be Unicode UTF-8 encoded, and null-terminated. + +METALabelDict = { + 0: "MojikumiX4051", # An integer in the range 1-20 + 1: "UNIUnifiedBaseChars", + 2: "BaseFontName", + 3: "Language", + 4: "CreationDate", + 5: "FoundryName", + 6: "FoundryCopyright", + 7: "OwnerURI", + 8: "WritingScript", + 10: "StrokeCount", + 11: "IndexingRadical", +} + + +def getLabelString(labelID): + try: + label = METALabelDict[labelID] + except KeyError: + label = "Unknown label" + return str(label) + + +class table_M_E_T_A_(DefaultTable.DefaultTable): + dependencies = [] + + def decompile(self, data, ttFont): + dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self) + self.glyphRecords = [] + for i in range(self.nMetaRecs): + glyphRecord, newData = sstruct.unpack2( + METAGlyphRecordFormat, newData, GlyphRecord() + ) + if self.metaFlags == 0: + [glyphRecord.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + elif self.metaFlags == 1: + [glyphRecord.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + else: + assert 0, ( + "The metaFlags field in the META table header has a value other than 0 or 1 :" + + str(self.metaFlags) + ) + glyphRecord.stringRecs = [] + newData = data[glyphRecord.offset :] + for j in range(glyphRecord.nMetaEntry): + stringRec, newData = sstruct.unpack2( + METAStringRecordFormat, newData, StringRecord() + ) + if self.metaFlags == 0: + [stringRec.offset] = struct.unpack(">H", newData[:2]) + newData = newData[2:] + else: + [stringRec.offset] = struct.unpack(">H", newData[:4]) + newData = newData[4:] + stringRec.string = data[ + stringRec.offset : stringRec.offset + stringRec.stringLen + ] + glyphRecord.stringRecs.append(stringRec) + self.glyphRecords.append(glyphRecord) + + def compile(self, ttFont): + offsetOK = 0 + self.nMetaRecs = len(self.glyphRecords) + count = 0 + while offsetOK != 1: + count = count + 1 + if count > 4: + pdb.set_trace() + metaData = sstruct.pack(METAHeaderFormat, self) + stringRecsOffset = len(metaData) + self.nMetaRecs * ( + 6 + 2 * (self.metaFlags & 1) + ) + stringRecSize = 6 + 2 * (self.metaFlags & 1) + for glyphRec in self.glyphRecords: + glyphRec.offset = stringRecsOffset + if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + glyphRec.compile(self) + stringRecsOffset = stringRecsOffset + ( + glyphRec.nMetaEntry * stringRecSize + ) + # this will be the String Record offset for the next GlyphRecord. + if offsetOK == -1: + offsetOK = 0 + continue + + # metaData now contains the header and all of the GlyphRecords. Its length should bw + # the offset to the first StringRecord. + stringOffset = stringRecsOffset + for glyphRec in self.glyphRecords: + assert glyphRec.offset == len( + metaData + ), "Glyph record offset did not compile correctly! for rec:" + str( + glyphRec + ) + for stringRec in glyphRec.stringRecs: + stringRec.offset = stringOffset + if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0): + self.metaFlags = self.metaFlags + 1 + offsetOK = -1 + break + metaData = metaData + stringRec.compile(self) + stringOffset = stringOffset + stringRec.stringLen + if offsetOK == -1: + offsetOK = 0 + continue + + if ((self.metaFlags & 1) == 1) and (stringOffset < 65536): + self.metaFlags = self.metaFlags - 1 + continue + else: + offsetOK = 1 + + # metaData now contains the header and all of the GlyphRecords and all of the String Records. + # Its length should be the offset to the first string datum. + for glyphRec in self.glyphRecords: + for stringRec in glyphRec.stringRecs: + assert stringRec.offset == len( + metaData + ), "String offset did not compile correctly! for string:" + str( + stringRec.string + ) + metaData = metaData + stringRec.string + + return metaData + + def toXML(self, writer, ttFont): + writer.comment( + "Lengths and number of entries in this table will be recalculated by the compiler" + ) + writer.newline() + formatstring, names, fixes = sstruct.getformat(METAHeaderFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + for glyphRec in self.glyphRecords: + glyphRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "GlyphRecord": + if not hasattr(self, "glyphRecords"): + self.glyphRecords = [] + glyphRec = GlyphRecord() + self.glyphRecords.append(glyphRec) + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + glyphRec.fromXML(name, attrs, content, ttFont) + glyphRec.offset = -1 + glyphRec.nMetaEntry = len(glyphRec.stringRecs) + else: + setattr(self, name, safeEval(attrs["value"])) + + +class GlyphRecord(object): + def __init__(self): + self.glyphID = -1 + self.nMetaEntry = -1 + self.offset = -1 + self.stringRecs = [] + + def toXML(self, writer, ttFont): + writer.begintag("GlyphRecord") + writer.newline() + writer.simpletag("glyphID", value=self.glyphID) + writer.newline() + writer.simpletag("nMetaEntry", value=self.nMetaEntry) + writer.newline() + for stringRec in self.stringRecs: + stringRec.toXML(writer, ttFont) + writer.endtag("GlyphRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "StringRecord": + stringRec = StringRecord() + self.stringRecs.append(stringRec) + for element in content: + if isinstance(element, str): + continue + stringRec.fromXML(name, attrs, content, ttFont) + stringRec.stringLen = len(stringRec.string) + else: + setattr(self, name, safeEval(attrs["value"])) + + def compile(self, parentTable): + data = sstruct.pack(METAGlyphRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return ( + "GlyphRecord[ glyphID: " + + str(self.glyphID) + + ", nMetaEntry: " + + str(self.nMetaEntry) + + ", offset: " + + str(self.offset) + + " ]" + ) + + +# XXX The following two functions are really broken around UTF-8 vs Unicode + + +def mapXMLToUTF8(string): + uString = str() + strLen = len(string) + i = 0 + while i < strLen: + prefixLen = 0 + if string[i : i + 3] == "&#x": + prefixLen = 3 + elif string[i : i + 7] == "&#x": + prefixLen = 7 + if prefixLen: + i = i + prefixLen + j = i + while string[i] != ";": + i = i + 1 + valStr = string[j:i] + + uString = uString + chr(eval("0x" + valStr)) + else: + uString = uString + chr(byteord(string[i])) + i = i + 1 + + return uString.encode("utf_8") + + +def mapUTF8toXML(string): + uString = string.decode("utf_8") + string = "" + for uChar in uString: + i = ord(uChar) + if (i < 0x80) and (i > 0x1F): + string = string + uChar + else: + string = string + "&#x" + hex(i)[2:] + ";" + return string + + +class StringRecord(object): + def toXML(self, writer, ttFont): + writer.begintag("StringRecord") + writer.newline() + writer.simpletag("labelID", value=self.labelID) + writer.comment(getLabelString(self.labelID)) + writer.newline() + writer.newline() + writer.simpletag("string", value=mapUTF8toXML(self.string)) + writer.newline() + writer.endtag("StringRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + value = attrs["value"] + if name == "string": + self.string = mapXMLToUTF8(value) + else: + setattr(self, name, safeEval(value)) + + def compile(self, parentTable): + data = sstruct.pack(METAStringRecordFormat, self) + if parentTable.metaFlags == 0: + datum = struct.pack(">H", self.offset) + elif parentTable.metaFlags == 1: + datum = struct.pack(">L", self.offset) + data = data + datum + return data + + def __repr__(self): + return ( + "StringRecord [ labelID: " + + str(self.labelID) + + " aka " + + getLabelString(self.labelID) + + ", offset: " + + str(self.offset) + + ", length: " + + str(self.stringLen) + + ", string: " + + self.string + + " ]" + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_V_A_R_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_V_A_R_.py new file mode 100644 index 0000000000000000000000000000000000000000..8371795eb2f2d2c233ec1725b8a2c21453170f23 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/M_V_A_R_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_M_V_A_R_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_V_G_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_V_G_.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc2befdfe8540b3fdd6fa19002d708992787f5f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S_V_G_.py @@ -0,0 +1,215 @@ +"""Compiles/decompiles SVG table. + +https://docs.microsoft.com/en-us/typography/opentype/spec/svg + +The XML format is: + +.. code-block:: xml + + + + <complete SVG doc> ]] + </svgDoc> + ... + <svgDoc endGlyphID="n" startGlyphID="m"> + <![CDATA[ <complete SVG doc> ]] + </svgDoc> + </SVG> +""" + +from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr +from fontTools.misc import sstruct +from . import DefaultTable +from collections.abc import Sequence +from dataclasses import dataclass, astuple +from io import BytesIO +import struct +import logging + + +log = logging.getLogger(__name__) + + +SVG_format_0 = """ + > # big endian + version: H + offsetToSVGDocIndex: L + reserved: L +""" + +SVG_format_0Size = sstruct.calcsize(SVG_format_0) + +doc_index_entry_format_0 = """ + > # big endian + startGlyphID: H + endGlyphID: H + svgDocOffset: L + svgDocLength: L +""" + +doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0) + + +class table_S_V_G_(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + self.docList = [] + # Version 0 is the standardized version of the table; and current. + # https://www.microsoft.com/typography/otspec/svg.htm + sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self) + if self.version != 0: + log.warning( + "Unknown SVG table version '%s'. Decompiling as version 0.", + self.version, + ) + # read in SVG Documents Index + # data starts with the first entry of the entry list. + pos = subTableStart = self.offsetToSVGDocIndex + self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0] + pos += 2 + if self.numEntries > 0: + data2 = data[pos:] + entries = [] + for i in range(self.numEntries): + record_data = data2[ + i + * doc_index_entry_format_0Size : (i + 1) + * doc_index_entry_format_0Size + ] + docIndexEntry = sstruct.unpack( + doc_index_entry_format_0, record_data, DocumentIndexEntry() + ) + entries.append(docIndexEntry) + + for entry in entries: + start = entry.svgDocOffset + subTableStart + end = start + entry.svgDocLength + doc = data[start:end] + compressed = False + if doc.startswith(b"\x1f\x8b"): + import gzip + + bytesIO = BytesIO(doc) + with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper: + doc = gunzipper.read() + del bytesIO + compressed = True + doc = tostr(doc, "utf_8") + self.docList.append( + SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed) + ) + + def compile(self, ttFont): + version = 0 + offsetToSVGDocIndex = ( + SVG_format_0Size # I start the SVGDocIndex right after the header. + ) + # get SGVDoc info. + docList = [] + entryList = [] + numEntries = len(self.docList) + datum = struct.pack(">H", numEntries) + entryList.append(datum) + curOffset = len(datum) + doc_index_entry_format_0Size * numEntries + seenDocs = {} + allCompressed = getattr(self, "compressed", False) + for i, doc in enumerate(self.docList): + if isinstance(doc, (list, tuple)): + doc = SVGDocument(*doc) + self.docList[i] = doc + docBytes = tobytes(doc.data, encoding="utf_8") + if (allCompressed or doc.compressed) and not docBytes.startswith( + b"\x1f\x8b" + ): + import gzip + + bytesIO = BytesIO() + # mtime=0 strips the useless timestamp and makes gzip output reproducible; + # equivalent to `gzip -n` + with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper: + gzipper.write(docBytes) + gzipped = bytesIO.getvalue() + if len(gzipped) < len(docBytes): + docBytes = gzipped + del gzipped, bytesIO + docLength = len(docBytes) + if docBytes in seenDocs: + docOffset = seenDocs[docBytes] + else: + docOffset = curOffset + curOffset += docLength + seenDocs[docBytes] = docOffset + docList.append(docBytes) + entry = struct.pack( + ">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength + ) + entryList.append(entry) + entryList.extend(docList) + svgDocData = bytesjoin(entryList) + + reserved = 0 + header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved) + data = [header, svgDocData] + data = bytesjoin(data) + return data + + def toXML(self, writer, ttFont): + for i, doc in enumerate(self.docList): + if isinstance(doc, (list, tuple)): + doc = SVGDocument(*doc) + self.docList[i] = doc + attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID} + if doc.compressed: + attrs["compressed"] = 1 + writer.begintag("svgDoc", **attrs) + writer.newline() + writer.writecdata(doc.data) + writer.newline() + writer.endtag("svgDoc") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "svgDoc": + if not hasattr(self, "docList"): + self.docList = [] + doc = strjoin(content) + doc = doc.strip() + startGID = int(attrs["startGlyphID"]) + endGID = int(attrs["endGlyphID"]) + compressed = bool(safeEval(attrs.get("compressed", "0"))) + self.docList.append(SVGDocument(doc, startGID, endGID, compressed)) + else: + log.warning("Unknown %s %s", name, content) + + +class DocumentIndexEntry(object): + def __init__(self): + self.startGlyphID = None # USHORT + self.endGlyphID = None # USHORT + self.svgDocOffset = None # ULONG + self.svgDocLength = None # ULONG + + def __repr__(self): + return ( + "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" + % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength) + ) + + +@dataclass +class SVGDocument(Sequence): + data: str + startGlyphID: int + endGlyphID: int + compressed: bool = False + + # Previously, the SVG table's docList attribute contained a lists of 3 items: + # [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute. + # For backward compatibility with code that depends of them being sequences of + # fixed length=3, we subclass the Sequence abstract base class and pretend only + # the first three items are present. 'compressed' is only accessible via named + # attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]` + def __getitem__(self, index): + return astuple(self)[:3][index] + + def __len__(self): + return 3 diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S__i_l_l.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S__i_l_l.py new file mode 100644 index 0000000000000000000000000000000000000000..12b0b8f6cc55b337db857df99e27e7bb69bb5bbc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/S__i_l_l.py @@ -0,0 +1,87 @@ +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import floatToFixedToStr +from fontTools.misc.textTools import safeEval +from . import DefaultTable +from . import grUtils +import struct + +Sill_hdr = """ + > + version: 16.16F +""" + + +class table_S__i_l_l(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.langs = {} + + def decompile(self, data, ttFont): + (_, data) = sstruct.unpack2(Sill_hdr, data, self) + self.version = float(floatToFixedToStr(self.version, precisionBits=16)) + (numLangs,) = struct.unpack(">H", data[:2]) + data = data[8:] + maxsetting = 0 + langinfo = [] + for i in range(numLangs): + (langcode, numsettings, offset) = struct.unpack( + ">4sHH", data[i * 8 : (i + 1) * 8] + ) + offset = int(offset / 8) - (numLangs + 1) + langcode = langcode.replace(b"\000", b"") + langinfo.append((langcode.decode("utf-8"), numsettings, offset)) + maxsetting = max(maxsetting, offset + numsettings) + data = data[numLangs * 8 :] + finfo = [] + for i in range(maxsetting): + (fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8]) + finfo.append((fid, val)) + self.langs = {} + for c, n, o in langinfo: + self.langs[c] = [] + for i in range(o, o + n): + self.langs[c].append(finfo[i]) + + def compile(self, ttFont): + ldat = b"" + fdat = b"" + offset = len(self.langs) + for c, inf in sorted(self.langs.items()): + ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20) + for fid, val in inf: + fdat += struct.pack(">LHH", fid, val, 0) + offset += len(inf) + ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20) + return ( + sstruct.pack(Sill_hdr, self) + + grUtils.bininfo(len(self.langs)) + + ldat + + fdat + ) + + def toXML(self, writer, ttFont): + writer.simpletag("version", version=self.version) + writer.newline() + for c, inf in sorted(self.langs.items()): + writer.begintag("lang", name=c) + writer.newline() + for fid, val in inf: + writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val) + writer.newline() + writer.endtag("lang") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + self.version = float(safeEval(attrs["version"])) + elif name == "lang": + c = attrs["name"] + self.langs[c] = [] + for element in content: + if not isinstance(element, tuple): + continue + tag, a, subcontent = element + if tag == "feature": + self.langs[c].append( + (grUtils.tag2num(a["fid"]), int(safeEval(a["val"]))) + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_C_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_C_.py new file mode 100644 index 0000000000000000000000000000000000000000..573b3f9c3970766ea817994509f4939ef4f70f0c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_C_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_T_S_I_C_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_J_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_J_.py new file mode 100644 index 0000000000000000000000000000000000000000..bc8fe92aac9d18bfd5ee565588d8cebf7d00afd1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I_J_.py @@ -0,0 +1,5 @@ +from .T_S_I_V_ import table_T_S_I_V_ + + +class table_T_S_I_J_(table_T_S_I_V_): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__0.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__0.py new file mode 100644 index 0000000000000000000000000000000000000000..77905822a8a5f4c1016f094f41f71a365c7584c7 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__0.py @@ -0,0 +1,57 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI0 is the index table containing the lengths and offsets for the glyph +programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained +in the TSI1 table. +""" + +from . import DefaultTable +import struct + +tsi0Format = ">HHL" + + +def fixlongs(glyphID, textLength, textOffset): + return int(glyphID), int(textLength), textOffset + + +class table_T_S_I__0(DefaultTable.DefaultTable): + dependencies = ["TSI1"] + + def decompile(self, data, ttFont): + numGlyphs = ttFont["maxp"].numGlyphs + indices = [] + size = struct.calcsize(tsi0Format) + for i in range(numGlyphs + 5): + glyphID, textLength, textOffset = fixlongs( + *struct.unpack(tsi0Format, data[:size]) + ) + indices.append((glyphID, textLength, textOffset)) + data = data[size:] + assert len(data) == 0 + assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number" + self.indices = indices[:-5] + self.extra_indices = indices[-4:] + + def compile(self, ttFont): + if not hasattr(self, "indices"): + # We have no corresponding table (TSI1 or TSI3); let's return + # no data, which effectively means "ignore us". + return b"" + data = b"" + for index, textLength, textOffset in self.indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34) + for index, textLength, textOffset in self.extra_indices: + data = data + struct.pack(tsi0Format, index, textLength, textOffset) + return data + + def set(self, indices, extra_indices): + # gets called by 'TSI1' or 'TSI3' + self.indices = indices + self.extra_indices = extra_indices + + def toXML(self, writer, ttFont): + writer.comment("This table will be calculated by the compiler") + writer.newline() diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__2.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__2.py new file mode 100644 index 0000000000000000000000000000000000000000..163ef45226d00fa14537cb25e37601f8498a9e7b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__2.py @@ -0,0 +1,15 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI2 is the index table containing the lengths and offsets for the glyph +programs that are contained in the TSI3 table. It uses the same format as +the TSI0 table. +""" + +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI0") + + +class table_T_S_I__2(superclass): + dependencies = ["TSI3"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__3.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__3.py new file mode 100644 index 0000000000000000000000000000000000000000..604a7f0bebb6c7a3494b14f79c3f30a4dc7d0230 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/T_S_I__3.py @@ -0,0 +1,20 @@ +""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT) +tool to store its hinting source data. + +TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code. +""" + +from fontTools import ttLib + +superclass = ttLib.getTableClass("TSI1") + + +class table_T_S_I__3(superclass): + extras = { + 0xFFFA: "reserved0", + 0xFFFB: "reserved1", + 0xFFFC: "reserved2", + 0xFFFD: "reserved3", + } + + indextable = "TSI2" diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_O_R_G_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_O_R_G_.py new file mode 100644 index 0000000000000000000000000000000000000000..b08737b224bc11b4bf56f7e8b41e901c861f5f69 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_O_R_G_.py @@ -0,0 +1,158 @@ +from fontTools.misc.textTools import bytesjoin, safeEval +from . import DefaultTable +import struct + + +class table_V_O_R_G_(DefaultTable.DefaultTable): + """This table is structured so that you can treat it like a dictionary keyed by glyph name. + + ``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph. + + ``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph. + """ + + def decompile(self, data, ttFont): + self.getGlyphName = ( + ttFont.getGlyphName + ) # for use in get/set item functions, for access by GID + ( + self.majorVersion, + self.minorVersion, + self.defaultVertOriginY, + self.numVertOriginYMetrics, + ) = struct.unpack(">HHhH", data[:8]) + assert ( + self.majorVersion <= 1 + ), "Major version of VORG table is higher than I know how to handle" + data = data[8:] + vids = [] + gids = [] + pos = 0 + for i in range(self.numVertOriginYMetrics): + gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4]) + pos += 4 + gids.append(gid) + vids.append(vOrigin) + + self.VOriginRecords = vOrig = {} + glyphOrder = ttFont.getGlyphOrder() + try: + names = [glyphOrder[gid] for gid in gids] + except IndexError: + getGlyphName = self.getGlyphName + names = map(getGlyphName, gids) + + for name, vid in zip(names, vids): + vOrig[name] = vid + + def compile(self, ttFont): + vorgs = list(self.VOriginRecords.values()) + names = list(self.VOriginRecords.keys()) + nameMap = ttFont.getReverseGlyphMap() + try: + gids = [nameMap[name] for name in names] + except KeyError: + nameMap = ttFont.getReverseGlyphMap(rebuild=True) + gids = [nameMap[name] for name in names] + vOriginTable = list(zip(gids, vorgs)) + self.numVertOriginYMetrics = len(vorgs) + vOriginTable.sort() # must be in ascending GID order + dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable] + header = struct.pack( + ">HHhH", + self.majorVersion, + self.minorVersion, + self.defaultVertOriginY, + self.numVertOriginYMetrics, + ) + dataList.insert(0, header) + data = bytesjoin(dataList) + return data + + def toXML(self, writer, ttFont): + writer.simpletag("majorVersion", value=self.majorVersion) + writer.newline() + writer.simpletag("minorVersion", value=self.minorVersion) + writer.newline() + writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY) + writer.newline() + writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics) + writer.newline() + vOriginTable = [] + glyphNames = self.VOriginRecords.keys() + for glyphName in glyphNames: + try: + gid = ttFont.getGlyphID(glyphName) + except: + assert 0, ( + "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + + str(glyphName) + ) + vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]]) + vOriginTable.sort() + for entry in vOriginTable: + vOriginRec = VOriginRecord(entry[1], entry[2]) + vOriginRec.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "VOriginRecords"): + self.VOriginRecords = {} + self.getGlyphName = ( + ttFont.getGlyphName + ) # for use in get/set item functions, for access by GID + if name == "VOriginRecord": + vOriginRec = VOriginRecord() + for element in content: + if isinstance(element, str): + continue + name, attrs, content = element + vOriginRec.fromXML(name, attrs, content, ttFont) + self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin + elif "value" in attrs: + setattr(self, name, safeEval(attrs["value"])) + + def __getitem__(self, glyphSelector): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if glyphSelector not in self.VOriginRecords: + return self.defaultVertOriginY + + return self.VOriginRecords[glyphSelector] + + def __setitem__(self, glyphSelector, value): + if isinstance(glyphSelector, int): + # its a gid, convert to glyph name + glyphSelector = self.getGlyphName(glyphSelector) + + if value != self.defaultVertOriginY: + self.VOriginRecords[glyphSelector] = value + elif glyphSelector in self.VOriginRecords: + del self.VOriginRecords[glyphSelector] + + def __delitem__(self, glyphSelector): + del self.VOriginRecords[glyphSelector] + + +class VOriginRecord(object): + def __init__(self, name=None, vOrigin=None): + self.glyphName = name + self.vOrigin = vOrigin + + def toXML(self, writer, ttFont): + writer.begintag("VOriginRecord") + writer.newline() + writer.simpletag("glyphName", value=self.glyphName) + writer.newline() + writer.simpletag("vOrigin", value=self.vOrigin) + writer.newline() + writer.endtag("VOriginRecord") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + value = attrs["value"] + if name == "glyphName": + setattr(self, name, value) + else: + setattr(self, name, safeEval(value)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_V_A_R_.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_V_A_R_.py new file mode 100644 index 0000000000000000000000000000000000000000..a3665fea5ecc6bd4bf50b447de551994330aaca4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/V_V_A_R_.py @@ -0,0 +1,5 @@ +from .otBase import BaseTTXConverter + + +class table_V_V_A_R_(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e622f1d13496c3a387d76fe04996782d48e3752b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__init__.py @@ -0,0 +1,97 @@ +# DON'T EDIT! This file is generated by MetaTools/buildTableList.py. +def _moduleFinderHint(): + """Dummy function to let modulefinder know what tables may be + dynamically imported. Generated by MetaTools/buildTableList.py. + + >>> _moduleFinderHint() + """ + from . import B_A_S_E_ + from . import C_B_D_T_ + from . import C_B_L_C_ + from . import C_F_F_ + from . import C_F_F__2 + from . import C_O_L_R_ + from . import C_P_A_L_ + from . import D_S_I_G_ + from . import D__e_b_g + from . import E_B_D_T_ + from . import E_B_L_C_ + from . import F_F_T_M_ + from . import F__e_a_t + from . import G_D_E_F_ + from . import G_M_A_P_ + from . import G_P_K_G_ + from . import G_P_O_S_ + from . import G_S_U_B_ + from . import G__l_a_t + from . import G__l_o_c + from . import H_V_A_R_ + from . import J_S_T_F_ + from . import L_T_S_H_ + from . import M_A_T_H_ + from . import M_E_T_A_ + from . import M_V_A_R_ + from . import O_S_2f_2 + from . import S_I_N_G_ + from . import S_T_A_T_ + from . import S_V_G_ + from . import S__i_l_f + from . import S__i_l_l + from . import T_S_I_B_ + from . import T_S_I_C_ + from . import T_S_I_D_ + from . import T_S_I_J_ + from . import T_S_I_P_ + from . import T_S_I_S_ + from . import T_S_I_V_ + from . import T_S_I__0 + from . import T_S_I__1 + from . import T_S_I__2 + from . import T_S_I__3 + from . import T_S_I__5 + from . import T_T_F_A_ + from . import V_A_R_C_ + from . import V_D_M_X_ + from . import V_O_R_G_ + from . import V_V_A_R_ + from . import _a_n_k_r + from . import _a_v_a_r + from . import _b_s_l_n + from . import _c_i_d_g + from . import _c_m_a_p + from . import _c_v_a_r + from . import _c_v_t + from . import _f_e_a_t + from . import _f_p_g_m + from . import _f_v_a_r + from . import _g_a_s_p + from . import _g_c_i_d + from . import _g_l_y_f + from . import _g_v_a_r + from . import _h_d_m_x + from . import _h_e_a_d + from . import _h_h_e_a + from . import _h_m_t_x + from . import _k_e_r_n + from . import _l_c_a_r + from . import _l_o_c_a + from . import _l_t_a_g + from . import _m_a_x_p + from . import _m_e_t_a + from . import _m_o_r_t + from . import _m_o_r_x + from . import _n_a_m_e + from . import _o_p_b_d + from . import _p_o_s_t + from . import _p_r_e_p + from . import _p_r_o_p + from . import _s_b_i_x + from . import _t_r_a_k + from . import _v_h_e_a + from . import _v_m_t_x + + +if __name__ == "__main__": + import doctest, sys + + sys.exit(doctest.testmod().failed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_a_n_k_r.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_a_n_k_r.py new file mode 100644 index 0000000000000000000000000000000000000000..d1062ecc7bf75e3a9a346a68c2a17ae7d00a5c3f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_a_n_k_r.py @@ -0,0 +1,14 @@ +from .otBase import BaseTTXConverter + + +class table__a_n_k_r(BaseTTXConverter): + """ + The anchor point table provides a way to define anchor points. + These are points within the coordinate space of a given glyph, + independent of the control points used to render the glyph. + Anchor points are used in conjunction with the 'kerx' table. + + See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html + """ + + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_i_d_g.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_i_d_g.py new file mode 100644 index 0000000000000000000000000000000000000000..f11901baebf12fa8671730011ef27142b7d4cc04 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_c_i_d_g.py @@ -0,0 +1,19 @@ +# coding: utf-8 +from .otBase import BaseTTXConverter + + +class table__c_i_d_g(BaseTTXConverter): + """The AAT ``cidg`` table has almost the same structure as ``gidc``, + just mapping CIDs to GlyphIDs instead of the reverse direction. + + It is useful for fonts that may be used by a PDF renderer in lieu of + a font reference with a known glyph collection but no subsetted + glyphs. For instance, a PDF can say “please use a font conforming + to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is, + say, a TrueType font. ``gidc`` is lossy for this purpose and is + obsoleted by ``cidg``. + + For example, the first font in ``/System/Library/Fonts/PingFang.ttc`` + (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table.""" + + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_e_a_t.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_e_a_t.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a48eff06cb14b1b2dc56c94ec7e02b80f11ca3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_e_a_t.py @@ -0,0 +1,12 @@ +from .otBase import BaseTTXConverter + + +class table__f_e_a_t(BaseTTXConverter): + """The feature name table is an AAT (Apple Advanced Typography) table for + storing font features, settings, and their human-readable names. It should + not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS`` + tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_ + in the TrueType Reference Manual for more information on the structure and + purpose of this table.""" + + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_p_g_m.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_p_g_m.py new file mode 100644 index 0000000000000000000000000000000000000000..df23041d65617af9c1f6feb00db970b7870c2268 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_p_g_m.py @@ -0,0 +1,49 @@ +from . import DefaultTable +from . import ttProgram + + +class table__f_p_g_m(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + program = ttProgram.Program() + program.fromBytecode(data) + self.program = program + + def compile(self, ttFont): + return self.program.getBytecode() + + def toXML(self, writer, ttFont): + self.program.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + program = ttProgram.Program() + program.fromXML(name, attrs, content, ttFont) + self.program = program + + def __bool__(self): + """ + >>> fpgm = table__f_p_g_m() + >>> bool(fpgm) + False + >>> p = ttProgram.Program() + >>> fpgm.program = p + >>> bool(fpgm) + False + >>> bc = bytearray([0]) + >>> p.fromBytecode(bc) + >>> bool(fpgm) + True + >>> p.bytecode.pop() + 0 + >>> bool(fpgm) + False + """ + return hasattr(self, "program") and bool(self.program) + + __nonzero__ = __bool__ + + +if __name__ == "__main__": + import sys + import doctest + + sys.exit(doctest.testmod().failed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_v_a_r.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_v_a_r.py new file mode 100644 index 0000000000000000000000000000000000000000..a3bdacd4cc8611587a765732b0da8c176d4c60a4 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_f_v_a_r.py @@ -0,0 +1,253 @@ +from fontTools.misc import sstruct +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi, + floatToFixedToStr as fl2str, + strToFixedToFloat as str2fl, +) +from fontTools.misc.textTools import Tag, bytesjoin, safeEval +from fontTools.ttLib import TTLibError +from . import DefaultTable +import struct + + +# Apple's documentation of 'fvar': +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html + +FVAR_HEADER_FORMAT = """ + > # big endian + version: L + offsetToData: H + countSizePairs: H + axisCount: H + axisSize: H + instanceCount: H + instanceSize: H +""" + +FVAR_AXIS_FORMAT = """ + > # big endian + axisTag: 4s + minValue: 16.16F + defaultValue: 16.16F + maxValue: 16.16F + flags: H + axisNameID: H +""" + +FVAR_INSTANCE_FORMAT = """ + > # big endian + subfamilyNameID: H + flags: H +""" + + +class table__f_v_a_r(DefaultTable.DefaultTable): + dependencies = ["name"] + + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.axes = [] + self.instances = [] + + def compile(self, ttFont): + instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4) + includePostScriptNames = any( + instance.postscriptNameID != 0xFFFF for instance in self.instances + ) + if includePostScriptNames: + instanceSize += 2 + header = { + "version": 0x00010000, + "offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT), + "countSizePairs": 2, + "axisCount": len(self.axes), + "axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT), + "instanceCount": len(self.instances), + "instanceSize": instanceSize, + } + result = [sstruct.pack(FVAR_HEADER_FORMAT, header)] + result.extend([axis.compile() for axis in self.axes]) + axisTags = [axis.axisTag for axis in self.axes] + for instance in self.instances: + result.append(instance.compile(axisTags, includePostScriptNames)) + return bytesjoin(result) + + def decompile(self, data, ttFont): + header = {} + headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT) + header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize]) + if header["version"] != 0x00010000: + raise TTLibError("unsupported 'fvar' version %04x" % header["version"]) + pos = header["offsetToData"] + axisSize = header["axisSize"] + for _ in range(header["axisCount"]): + axis = Axis() + axis.decompile(data[pos : pos + axisSize]) + self.axes.append(axis) + pos += axisSize + instanceSize = header["instanceSize"] + axisTags = [axis.axisTag for axis in self.axes] + for _ in range(header["instanceCount"]): + instance = NamedInstance() + instance.decompile(data[pos : pos + instanceSize], axisTags) + self.instances.append(instance) + pos += instanceSize + + def toXML(self, writer, ttFont): + for axis in self.axes: + axis.toXML(writer, ttFont) + for instance in self.instances: + instance.toXML(writer, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "Axis": + axis = Axis() + axis.fromXML(name, attrs, content, ttFont) + self.axes.append(axis) + elif name == "NamedInstance": + instance = NamedInstance() + instance.fromXML(name, attrs, content, ttFont) + self.instances.append(instance) + + def getAxes(self): + return {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in self.axes} + + +class Axis(object): + def __init__(self): + self.axisTag = None + self.axisNameID = 0 + self.flags = 0 + self.minValue = -1.0 + self.defaultValue = 0.0 + self.maxValue = 1.0 + + def compile(self): + return sstruct.pack(FVAR_AXIS_FORMAT, self) + + def decompile(self, data): + sstruct.unpack2(FVAR_AXIS_FORMAT, data, self) + + def toXML(self, writer, ttFont): + name = ( + ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None + ) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + writer.begintag("Axis") + writer.newline() + for tag, value in [ + ("AxisTag", self.axisTag), + ("Flags", "0x%X" % self.flags), + ("MinValue", fl2str(self.minValue, 16)), + ("DefaultValue", fl2str(self.defaultValue, 16)), + ("MaxValue", fl2str(self.maxValue, 16)), + ("AxisNameID", str(self.axisNameID)), + ]: + writer.begintag(tag) + writer.write(value) + writer.endtag(tag) + writer.newline() + writer.endtag("Axis") + writer.newline() + + def fromXML(self, name, _attrs, content, ttFont): + assert name == "Axis" + for tag, _, value in filter(lambda t: type(t) is tuple, content): + value = "".join(value) + if tag == "AxisTag": + self.axisTag = Tag(value) + elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}: + setattr( + self, + tag[0].lower() + tag[1:], + str2fl(value, 16) if tag.endswith("Value") else safeEval(value), + ) + + +class NamedInstance(object): + def __init__(self): + self.subfamilyNameID = 0 + self.postscriptNameID = 0xFFFF + self.flags = 0 + self.coordinates = {} + + def compile(self, axisTags, includePostScriptName): + result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)] + for axis in axisTags: + fixedCoord = fl2fi(self.coordinates[axis], 16) + result.append(struct.pack(">l", fixedCoord)) + if includePostScriptName: + result.append(struct.pack(">H", self.postscriptNameID)) + return bytesjoin(result) + + def decompile(self, data, axisTags): + sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self) + pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + for axis in axisTags: + value = struct.unpack(">l", data[pos : pos + 4])[0] + self.coordinates[axis] = fi2fl(value, 16) + pos += 4 + if pos + 2 <= len(data): + self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0] + else: + self.postscriptNameID = 0xFFFF + + def toXML(self, writer, ttFont): + name = ( + ttFont["name"].getDebugName(self.subfamilyNameID) + if "name" in ttFont + else None + ) + if name is not None: + writer.newline() + writer.comment(name) + writer.newline() + psname = ( + ttFont["name"].getDebugName(self.postscriptNameID) + if "name" in ttFont + else None + ) + if psname is not None: + writer.comment("PostScript: " + psname) + writer.newline() + if self.postscriptNameID == 0xFFFF: + writer.begintag( + "NamedInstance", + flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID, + ) + else: + writer.begintag( + "NamedInstance", + flags=("0x%X" % self.flags), + subfamilyNameID=self.subfamilyNameID, + postscriptNameID=self.postscriptNameID, + ) + writer.newline() + for axis in ttFont["fvar"].axes: + writer.simpletag( + "coord", + axis=axis.axisTag, + value=fl2str(self.coordinates[axis.axisTag], 16), + ) + writer.newline() + writer.endtag("NamedInstance") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + assert name == "NamedInstance" + self.subfamilyNameID = safeEval(attrs["subfamilyNameID"]) + self.flags = safeEval(attrs.get("flags", "0")) + if "postscriptNameID" in attrs: + self.postscriptNameID = safeEval(attrs["postscriptNameID"]) + else: + self.postscriptNameID = 0xFFFF + + for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content): + if tag == "coord": + value = str2fl(elementAttrs["value"], 16) + self.coordinates[elementAttrs["axis"]] = value diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_a_s_p.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_a_s_p.py new file mode 100644 index 0000000000000000000000000000000000000000..10c32a87f4b2cbedac5e346c6f5d578cb7a6b65d --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_a_s_p.py @@ -0,0 +1,55 @@ +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import struct + + +GASP_SYMMETRIC_GRIDFIT = 0x0004 +GASP_SYMMETRIC_SMOOTHING = 0x0008 +GASP_DOGRAY = 0x0002 +GASP_GRIDFIT = 0x0001 + + +class table__g_a_s_p(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + self.version, numRanges = struct.unpack(">HH", data[:4]) + assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version + data = data[4:] + self.gaspRange = {} + for i in range(numRanges): + rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4]) + self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior) + data = data[4:] + assert not data, "too much data" + + def compile(self, ttFont): + version = 0 # ignore self.version + numRanges = len(self.gaspRange) + data = b"" + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior) + if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY): + version = 1 + data = struct.pack(">HH", version, numRanges) + data + return data + + def toXML(self, writer, ttFont): + items = sorted(self.gaspRange.items()) + for rangeMaxPPEM, rangeGaspBehavior in items: + writer.simpletag( + "gaspRange", + [ + ("rangeMaxPPEM", rangeMaxPPEM), + ("rangeGaspBehavior", rangeGaspBehavior), + ], + ) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "gaspRange": + return + if not hasattr(self, "gaspRange"): + self.gaspRange = {} + self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval( + attrs["rangeGaspBehavior"] + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_l_y_f.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_l_y_f.py new file mode 100644 index 0000000000000000000000000000000000000000..fa11cf8f47179ed52d0887a3dcb5e9c66d1274fc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_g_l_y_f.py @@ -0,0 +1,2222 @@ +"""_g_l_y_f.py -- Converter classes for the 'glyf' table.""" + +from collections import namedtuple +from fontTools.misc import sstruct +from fontTools import ttLib +from fontTools import version +from fontTools.misc.transform import DecomposedTransform +from fontTools.misc.textTools import tostr, safeEval, pad +from fontTools.misc.arrayTools import updateBounds, pointInRect +from fontTools.misc.bezierTools import calcQuadraticBounds +from fontTools.misc.fixedTools import ( + fixedToFloat as fi2fl, + floatToFixed as fl2fi, + floatToFixedToStr as fl2str, + strToFixedToFloat as str2fl, +) +from fontTools.misc.roundTools import noRound, otRound +from fontTools.misc.vector import Vector +from numbers import Number +from . import DefaultTable +from . import ttProgram +import sys +import struct +import array +import logging +import math +import os +from fontTools.misc import xmlWriter +from fontTools.misc.filenames import userNameToFileName +from fontTools.misc.loggingTools import deprecateFunction +from enum import IntFlag +from functools import partial +from types import SimpleNamespace +from typing import Set + +log = logging.getLogger(__name__) + +# We compute the version the same as is computed in ttlib/__init__ +# so that we can write 'ttLibVersion' attribute of the glyf TTX files +# when glyf is written to separate files. +version = ".".join(version.split(".")[:2]) + +# +# The Apple and MS rasterizers behave differently for +# scaled composite components: one does scale first and then translate +# and the other does it vice versa. MS defined some flags to indicate +# the difference, but it seems nobody actually _sets_ those flags. +# +# Funny thing: Apple seems to _only_ do their thing in the +# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE +# (eg. Charcoal)... +# +SCALE_COMPONENT_OFFSET_DEFAULT = 0 # 0 == MS, 1 == Apple + + +class table__g_l_y_f(DefaultTable.DefaultTable): + """Glyph Data Table + + This class represents the `glyf <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf>`_ + table, which contains outlines for glyphs in TrueType format. In many cases, + it is easier to access and manipulate glyph outlines through the ``GlyphSet`` + object returned from :py:meth:`fontTools.ttLib.ttFont.getGlyphSet`:: + + >> from fontTools.pens.boundsPen import BoundsPen + >> glyphset = font.getGlyphSet() + >> bp = BoundsPen(glyphset) + >> glyphset["A"].draw(bp) + >> bp.bounds + (19, 0, 633, 716) + + However, this class can be used for low-level access to the ``glyf`` table data. + Objects of this class support dictionary-like access, mapping glyph names to + :py:class:`Glyph` objects:: + + >> glyf = font["glyf"] + >> len(glyf["Aacute"].components) + 2 + + Note that when adding glyphs to the font via low-level access to the ``glyf`` + table, the new glyphs must also be added to the ``hmtx``/``vmtx`` table:: + + >> font["glyf"]["divisionslash"] = Glyph() + >> font["hmtx"]["divisionslash"] = (640, 0) + + """ + + dependencies = ["fvar"] + + # this attribute controls the amount of padding applied to glyph data upon compile. + # Glyph lenghts are aligned to multiples of the specified value. + # Allowed values are (0, 1, 2, 4). '0' means no padding; '1' (default) also means + # no padding, except for when padding would allow to use short loca offsets. + padding = 1 + + def decompile(self, data, ttFont): + self.axisTags = ( + [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else [] + ) + loca = ttFont["loca"] + pos = int(loca[0]) + nextPos = 0 + noname = 0 + self.glyphs = {} + self.glyphOrder = glyphOrder = ttFont.getGlyphOrder() + self._reverseGlyphOrder = {} + for i in range(0, len(loca) - 1): + try: + glyphName = glyphOrder[i] + except IndexError: + noname = noname + 1 + glyphName = "ttxautoglyph%s" % i + nextPos = int(loca[i + 1]) + glyphdata = data[pos:nextPos] + if len(glyphdata) != (nextPos - pos): + raise ttLib.TTLibError("not enough 'glyf' table data") + glyph = Glyph(glyphdata) + self.glyphs[glyphName] = glyph + pos = nextPos + if len(data) - nextPos >= 4: + log.warning( + "too much 'glyf' table data: expected %d, received %d bytes", + nextPos, + len(data), + ) + if noname: + log.warning("%s glyphs have no name", noname) + if ttFont.lazy is False: # Be lazy for None and True + self.ensureDecompiled() + + def ensureDecompiled(self, recurse=False): + # The recurse argument is unused, but part of the signature of + # ensureDecompiled across the library. + for glyph in self.glyphs.values(): + glyph.expand(self) + + def compile(self, ttFont): + self.axisTags = ( + [axis.axisTag for axis in ttFont["fvar"].axes] if "fvar" in ttFont else [] + ) + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + padding = self.padding + assert padding in (0, 1, 2, 4) + locations = [] + currentLocation = 0 + dataList = [] + recalcBBoxes = ttFont.recalcBBoxes + boundsDone = set() + for glyphName in self.glyphOrder: + glyph = self.glyphs[glyphName] + glyphData = glyph.compile(self, recalcBBoxes, boundsDone=boundsDone) + if padding > 1: + glyphData = pad(glyphData, size=padding) + locations.append(currentLocation) + currentLocation = currentLocation + len(glyphData) + dataList.append(glyphData) + locations.append(currentLocation) + + if padding == 1 and currentLocation < 0x20000: + # See if we can pad any odd-lengthed glyphs to allow loca + # table to use the short offsets. + indices = [ + i for i, glyphData in enumerate(dataList) if len(glyphData) % 2 == 1 + ] + if indices and currentLocation + len(indices) < 0x20000: + # It fits. Do it. + for i in indices: + dataList[i] += b"\0" + currentLocation = 0 + for i, glyphData in enumerate(dataList): + locations[i] = currentLocation + currentLocation += len(glyphData) + locations[len(dataList)] = currentLocation + + data = b"".join(dataList) + if "loca" in ttFont: + ttFont["loca"].set(locations) + if "maxp" in ttFont: + ttFont["maxp"].numGlyphs = len(self.glyphs) + if not data: + # As a special case when all glyph in the font are empty, add a zero byte + # to the table, so that OTS doesn’t reject it, and to make the table work + # on Windows as well. + # See https://github.com/khaledhosny/ots/issues/52 + data = b"\0" + return data + + def toXML(self, writer, ttFont, splitGlyphs=False): + notice = ( + "The xMin, yMin, xMax and yMax values\n" + "will be recalculated by the compiler." + ) + glyphNames = ttFont.getGlyphNames() + if not splitGlyphs: + writer.newline() + writer.comment(notice) + writer.newline() + writer.newline() + numGlyphs = len(glyphNames) + if splitGlyphs: + path, ext = os.path.splitext(writer.file.name) + existingGlyphFiles = set() + for glyphName in glyphNames: + glyph = self.get(glyphName) + if glyph is None: + log.warning("glyph '%s' does not exist in glyf table", glyphName) + continue + if glyph.numberOfContours: + if splitGlyphs: + glyphPath = userNameToFileName( + tostr(glyphName, "utf-8"), + existingGlyphFiles, + prefix=path + ".", + suffix=ext, + ) + existingGlyphFiles.add(glyphPath.lower()) + glyphWriter = xmlWriter.XMLWriter( + glyphPath, + idlefunc=writer.idlefunc, + newlinestr=writer.newlinestr, + ) + glyphWriter.begintag("ttFont", ttLibVersion=version) + glyphWriter.newline() + glyphWriter.begintag("glyf") + glyphWriter.newline() + glyphWriter.comment(notice) + glyphWriter.newline() + writer.simpletag("TTGlyph", src=os.path.basename(glyphPath)) + else: + glyphWriter = writer + glyphWriter.begintag( + "TTGlyph", + [ + ("name", glyphName), + ("xMin", glyph.xMin), + ("yMin", glyph.yMin), + ("xMax", glyph.xMax), + ("yMax", glyph.yMax), + ], + ) + glyphWriter.newline() + glyph.toXML(glyphWriter, ttFont) + glyphWriter.endtag("TTGlyph") + glyphWriter.newline() + if splitGlyphs: + glyphWriter.endtag("glyf") + glyphWriter.newline() + glyphWriter.endtag("ttFont") + glyphWriter.newline() + glyphWriter.close() + else: + writer.simpletag("TTGlyph", name=glyphName) + writer.comment("contains no outline data") + if not splitGlyphs: + writer.newline() + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name != "TTGlyph": + return + if not hasattr(self, "glyphs"): + self.glyphs = {} + if not hasattr(self, "glyphOrder"): + self.glyphOrder = ttFont.getGlyphOrder() + glyphName = attrs["name"] + log.debug("unpacking glyph '%s'", glyphName) + glyph = Glyph() + for attr in ["xMin", "yMin", "xMax", "yMax"]: + setattr(glyph, attr, safeEval(attrs.get(attr, "0"))) + self.glyphs[glyphName] = glyph + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + glyph.fromXML(name, attrs, content, ttFont) + if not ttFont.recalcBBoxes: + glyph.compact(self, 0) + + def setGlyphOrder(self, glyphOrder): + """Sets the glyph order + + Args: + glyphOrder ([str]): List of glyph names in order. + """ + self.glyphOrder = glyphOrder + self._reverseGlyphOrder = {} + + def getGlyphName(self, glyphID): + """Returns the name for the glyph with the given ID. + + Raises a ``KeyError`` if the glyph name is not found in the font. + """ + return self.glyphOrder[glyphID] + + def _buildReverseGlyphOrderDict(self): + self._reverseGlyphOrder = d = {} + for glyphID, glyphName in enumerate(self.glyphOrder): + d[glyphName] = glyphID + + def getGlyphID(self, glyphName): + """Returns the ID of the glyph with the given name. + + Raises a ``ValueError`` if the glyph is not found in the font. + """ + glyphOrder = self.glyphOrder + id = getattr(self, "_reverseGlyphOrder", {}).get(glyphName) + if id is None or id >= len(glyphOrder) or glyphOrder[id] != glyphName: + self._buildReverseGlyphOrderDict() + id = self._reverseGlyphOrder.get(glyphName) + if id is None: + raise ValueError(glyphName) + return id + + def removeHinting(self): + """Removes TrueType hints from all glyphs in the glyphset. + + See :py:meth:`Glyph.removeHinting`. + """ + for glyph in self.glyphs.values(): + glyph.removeHinting() + + def keys(self): + return self.glyphs.keys() + + def has_key(self, glyphName): + return glyphName in self.glyphs + + __contains__ = has_key + + def get(self, glyphName, default=None): + glyph = self.glyphs.get(glyphName, default) + if glyph is not None: + glyph.expand(self) + return glyph + + def __getitem__(self, glyphName): + glyph = self.glyphs[glyphName] + glyph.expand(self) + return glyph + + def __setitem__(self, glyphName, glyph): + self.glyphs[glyphName] = glyph + if glyphName not in self.glyphOrder: + self.glyphOrder.append(glyphName) + + def __delitem__(self, glyphName): + del self.glyphs[glyphName] + self.glyphOrder.remove(glyphName) + + def __len__(self): + assert len(self.glyphOrder) == len(self.glyphs) + return len(self.glyphs) + + def _getPhantomPoints(self, glyphName, hMetrics, vMetrics=None): + """Compute the four "phantom points" for the given glyph from its bounding box + and the horizontal and vertical advance widths and sidebearings stored in the + ttFont's "hmtx" and "vmtx" tables. + + 'hMetrics' should be ttFont['hmtx'].metrics. + + 'vMetrics' should be ttFont['vmtx'].metrics if there is "vmtx" or None otherwise. + If there is no vMetrics passed in, vertical phantom points are set to the zero coordinate. + + https://docs.microsoft.com/en-us/typography/opentype/spec/tt_instructing_glyphs#phantoms + """ + glyph = self[glyphName] + if not hasattr(glyph, "xMin"): + glyph.recalcBounds(self) + + horizontalAdvanceWidth, leftSideBearing = hMetrics[glyphName] + leftSideX = glyph.xMin - leftSideBearing + rightSideX = leftSideX + horizontalAdvanceWidth + + if vMetrics: + verticalAdvanceWidth, topSideBearing = vMetrics[glyphName] + topSideY = topSideBearing + glyph.yMax + bottomSideY = topSideY - verticalAdvanceWidth + else: + bottomSideY = topSideY = 0 + + return [ + (leftSideX, 0), + (rightSideX, 0), + (0, topSideY), + (0, bottomSideY), + ] + + def _getCoordinatesAndControls( + self, glyphName, hMetrics, vMetrics=None, *, round=otRound + ): + """Return glyph coordinates and controls as expected by "gvar" table. + + The coordinates includes four "phantom points" for the glyph metrics, + as mandated by the "gvar" spec. + + The glyph controls is a namedtuple with the following attributes: + - numberOfContours: -1 for composite glyphs. + - endPts: list of indices of end points for each contour in simple + glyphs, or component indices in composite glyphs (used for IUP + optimization). + - flags: array of contour point flags for simple glyphs (None for + composite glyphs). + - components: list of base glyph names (str) for each component in + composite glyphs (None for simple glyphs). + + The "hMetrics" and vMetrics are used to compute the "phantom points" (see + the "_getPhantomPoints" method). + + Return None if the requested glyphName is not present. + """ + glyph = self.get(glyphName) + if glyph is None: + return None + if glyph.isComposite(): + coords = GlyphCoordinates( + [(getattr(c, "x", 0), getattr(c, "y", 0)) for c in glyph.components] + ) + controls = _GlyphControls( + numberOfContours=glyph.numberOfContours, + endPts=list(range(len(glyph.components))), + flags=None, + components=[ + (c.glyphName, getattr(c, "transform", None)) + for c in glyph.components + ], + ) + else: + coords, endPts, flags = glyph.getCoordinates(self) + coords = coords.copy() + controls = _GlyphControls( + numberOfContours=glyph.numberOfContours, + endPts=endPts, + flags=flags, + components=None, + ) + # Add phantom points for (left, right, top, bottom) positions. + phantomPoints = self._getPhantomPoints(glyphName, hMetrics, vMetrics) + coords.extend(phantomPoints) + coords.toInt(round=round) + return coords, controls + + def _setCoordinates(self, glyphName, coord, hMetrics, vMetrics=None): + """Set coordinates and metrics for the given glyph. + + "coord" is an array of GlyphCoordinates which must include the "phantom + points" as the last four coordinates. + + Both the horizontal/vertical advances and left/top sidebearings in "hmtx" + and "vmtx" tables (if any) are updated from four phantom points and + the glyph's bounding boxes. + + The "hMetrics" and vMetrics are used to propagate "phantom points" + into "hmtx" and "vmtx" tables if desired. (see the "_getPhantomPoints" + method). + """ + glyph = self[glyphName] + + # Handle phantom points for (left, right, top, bottom) positions. + assert len(coord) >= 4 + leftSideX = coord[-4][0] + rightSideX = coord[-3][0] + topSideY = coord[-2][1] + bottomSideY = coord[-1][1] + + coord = coord[:-4] + + if glyph.isComposite(): + assert len(coord) == len(glyph.components) + for p, comp in zip(coord, glyph.components): + if hasattr(comp, "x"): + comp.x, comp.y = p + elif glyph.numberOfContours == 0: + assert len(coord) == 0 + else: + assert len(coord) == len(glyph.coordinates) + glyph.coordinates = GlyphCoordinates(coord) + + glyph.recalcBounds(self, boundsDone=set()) + + horizontalAdvanceWidth = otRound(rightSideX - leftSideX) + if horizontalAdvanceWidth < 0: + # unlikely, but it can happen, see: + # https://github.com/fonttools/fonttools/pull/1198 + horizontalAdvanceWidth = 0 + leftSideBearing = otRound(glyph.xMin - leftSideX) + hMetrics[glyphName] = horizontalAdvanceWidth, leftSideBearing + + if vMetrics is not None: + verticalAdvanceWidth = otRound(topSideY - bottomSideY) + if verticalAdvanceWidth < 0: # unlikely but do the same as horizontal + verticalAdvanceWidth = 0 + topSideBearing = otRound(topSideY - glyph.yMax) + vMetrics[glyphName] = verticalAdvanceWidth, topSideBearing + + # Deprecated + + def _synthesizeVMetrics(self, glyphName, ttFont, defaultVerticalOrigin): + """This method is wrong and deprecated. + For rationale see: + https://github.com/fonttools/fonttools/pull/2266/files#r613569473 + """ + vMetrics = getattr(ttFont.get("vmtx"), "metrics", None) + if vMetrics is None: + verticalAdvanceWidth = ttFont["head"].unitsPerEm + topSideY = getattr(ttFont.get("hhea"), "ascent", None) + if topSideY is None: + if defaultVerticalOrigin is not None: + topSideY = defaultVerticalOrigin + else: + topSideY = verticalAdvanceWidth + glyph = self[glyphName] + glyph.recalcBounds(self) + topSideBearing = otRound(topSideY - glyph.yMax) + vMetrics = {glyphName: (verticalAdvanceWidth, topSideBearing)} + return vMetrics + + @deprecateFunction("use '_getPhantomPoints' instead", category=DeprecationWarning) + def getPhantomPoints(self, glyphName, ttFont, defaultVerticalOrigin=None): + """Old public name for self._getPhantomPoints(). + See: https://github.com/fonttools/fonttools/pull/2266""" + hMetrics = ttFont["hmtx"].metrics + vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin) + return self._getPhantomPoints(glyphName, hMetrics, vMetrics) + + @deprecateFunction( + "use '_getCoordinatesAndControls' instead", category=DeprecationWarning + ) + def getCoordinatesAndControls(self, glyphName, ttFont, defaultVerticalOrigin=None): + """Old public name for self._getCoordinatesAndControls(). + See: https://github.com/fonttools/fonttools/pull/2266""" + hMetrics = ttFont["hmtx"].metrics + vMetrics = self._synthesizeVMetrics(glyphName, ttFont, defaultVerticalOrigin) + return self._getCoordinatesAndControls(glyphName, hMetrics, vMetrics) + + @deprecateFunction("use '_setCoordinates' instead", category=DeprecationWarning) + def setCoordinates(self, glyphName, ttFont): + """Old public name for self._setCoordinates(). + See: https://github.com/fonttools/fonttools/pull/2266""" + hMetrics = ttFont["hmtx"].metrics + vMetrics = getattr(ttFont.get("vmtx"), "metrics", None) + self._setCoordinates(glyphName, hMetrics, vMetrics) + + +_GlyphControls = namedtuple( + "_GlyphControls", "numberOfContours endPts flags components" +) + + +glyphHeaderFormat = """ + > # big endian + numberOfContours: h + xMin: h + yMin: h + xMax: h + yMax: h +""" + +# flags +flagOnCurve = 0x01 +flagXShort = 0x02 +flagYShort = 0x04 +flagRepeat = 0x08 +flagXsame = 0x10 +flagYsame = 0x20 +flagOverlapSimple = 0x40 +flagCubic = 0x80 + +# These flags are kept for XML output after decompiling the coordinates +keepFlags = flagOnCurve + flagOverlapSimple + flagCubic + +_flagSignBytes = { + 0: 2, + flagXsame: 0, + flagXShort | flagXsame: +1, + flagXShort: -1, + flagYsame: 0, + flagYShort | flagYsame: +1, + flagYShort: -1, +} + + +def flagBest(x, y, onCurve): + """For a given x,y delta pair, returns the flag that packs this pair + most efficiently, as well as the number of byte cost of such flag.""" + + flag = flagOnCurve if onCurve else 0 + cost = 0 + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + cost += 1 + else: + cost += 2 + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + cost += 1 + else: + cost += 2 + return flag, cost + + +def flagFits(newFlag, oldFlag, mask): + newBytes = _flagSignBytes[newFlag & mask] + oldBytes = _flagSignBytes[oldFlag & mask] + return newBytes == oldBytes or abs(newBytes) > abs(oldBytes) + + +def flagSupports(newFlag, oldFlag): + return ( + (oldFlag & flagOnCurve) == (newFlag & flagOnCurve) + and flagFits(newFlag, oldFlag, flagXsame | flagXShort) + and flagFits(newFlag, oldFlag, flagYsame | flagYShort) + ) + + +def flagEncodeCoord(flag, mask, coord, coordBytes): + byteCount = _flagSignBytes[flag & mask] + if byteCount == 1: + coordBytes.append(coord) + elif byteCount == -1: + coordBytes.append(-coord) + elif byteCount == 2: + coordBytes.extend(struct.pack(">h", coord)) + + +def flagEncodeCoords(flag, x, y, xBytes, yBytes): + flagEncodeCoord(flag, flagXsame | flagXShort, x, xBytes) + flagEncodeCoord(flag, flagYsame | flagYShort, y, yBytes) + + +ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes +ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points +ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true +WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0 +NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!) +MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one +WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy +WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11 +WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow +USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph +OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts +SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple) +UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS) + + +CompositeMaxpValues = namedtuple( + "CompositeMaxpValues", ["nPoints", "nContours", "maxComponentDepth"] +) + + +class Glyph(object): + """This class represents an individual TrueType glyph. + + TrueType glyph objects come in two flavours: simple and composite. Simple + glyph objects contain contours, represented via the ``.coordinates``, + ``.flags``, ``.numberOfContours``, and ``.endPtsOfContours`` attributes; + composite glyphs contain components, available through the ``.components`` + attributes. + + Because the ``.coordinates`` attribute (and other simple glyph attributes mentioned + above) is only set on simple glyphs and the ``.components`` attribute is only + set on composite glyphs, it is necessary to use the :py:meth:`isComposite` + method to test whether a glyph is simple or composite before attempting to + access its data. + + For a composite glyph, the components can also be accessed via array-like access:: + + >> assert(font["glyf"]["Aacute"].isComposite()) + >> font["glyf"]["Aacute"][0] + <fontTools.ttLib.tables._g_l_y_f.GlyphComponent at 0x1027b2ee0> + + """ + + def __init__(self, data=b""): + if not data: + # empty char + self.numberOfContours = 0 + return + self.data = data + + def compact(self, glyfTable, recalcBBoxes=True): + data = self.compile(glyfTable, recalcBBoxes) + self.__dict__.clear() + self.data = data + + def expand(self, glyfTable): + if not hasattr(self, "data"): + # already unpacked + return + if not self.data: + # empty char + del self.data + self.numberOfContours = 0 + return + dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self) + del self.data + # Some fonts (eg. Neirizi.ttf) have a 0 for numberOfContours in + # some glyphs; decompileCoordinates assumes that there's at least + # one, so short-circuit here. + if self.numberOfContours == 0: + return + if self.isComposite(): + self.decompileComponents(data, glyfTable) + else: + self.decompileCoordinates(data) + + def compile(self, glyfTable, recalcBBoxes=True, *, boundsDone=None): + if hasattr(self, "data"): + if recalcBBoxes: + # must unpack glyph in order to recalculate bounding box + self.expand(glyfTable) + else: + return self.data + if self.numberOfContours == 0: + return b"" + + if recalcBBoxes: + self.recalcBounds(glyfTable, boundsDone=boundsDone) + + data = sstruct.pack(glyphHeaderFormat, self) + if self.isComposite(): + data = data + self.compileComponents(glyfTable) + else: + data = data + self.compileCoordinates() + return data + + def toXML(self, writer, ttFont): + if self.isComposite(): + for compo in self.components: + compo.toXML(writer, ttFont) + haveInstructions = hasattr(self, "program") + else: + last = 0 + for i in range(self.numberOfContours): + writer.begintag("contour") + writer.newline() + for j in range(last, self.endPtsOfContours[i] + 1): + attrs = [ + ("x", self.coordinates[j][0]), + ("y", self.coordinates[j][1]), + ("on", self.flags[j] & flagOnCurve), + ] + if self.flags[j] & flagOverlapSimple: + # Apple's rasterizer uses flagOverlapSimple in the first contour/first pt to flag glyphs that contain overlapping contours + attrs.append(("overlap", 1)) + if self.flags[j] & flagCubic: + attrs.append(("cubic", 1)) + writer.simpletag("pt", attrs) + writer.newline() + last = self.endPtsOfContours[i] + 1 + writer.endtag("contour") + writer.newline() + haveInstructions = self.numberOfContours > 0 + if haveInstructions: + if self.program: + writer.begintag("instructions") + writer.newline() + self.program.toXML(writer, ttFont) + writer.endtag("instructions") + else: + writer.simpletag("instructions") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "contour": + if self.numberOfContours < 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = self.numberOfContours + 1 + coordinates = GlyphCoordinates() + flags = bytearray() + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name != "pt": + continue # ignore anything but "pt" + coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"]))) + flag = bool(safeEval(attrs["on"])) + if "overlap" in attrs and bool(safeEval(attrs["overlap"])): + flag |= flagOverlapSimple + if "cubic" in attrs and bool(safeEval(attrs["cubic"])): + flag |= flagCubic + flags.append(flag) + if not hasattr(self, "coordinates"): + self.coordinates = coordinates + self.flags = flags + self.endPtsOfContours = [len(coordinates) - 1] + else: + self.coordinates.extend(coordinates) + self.flags.extend(flags) + self.endPtsOfContours.append(len(self.coordinates) - 1) + elif name == "component": + if self.numberOfContours > 0: + raise ttLib.TTLibError("can't mix composites and contours in glyph") + self.numberOfContours = -1 + if not hasattr(self, "components"): + self.components = [] + component = GlyphComponent() + self.components.append(component) + component.fromXML(name, attrs, content, ttFont) + elif name == "instructions": + self.program = ttProgram.Program() + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + self.program.fromXML(name, attrs, content, ttFont) + + def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1): + assert self.isComposite() + nContours = 0 + nPoints = 0 + initialMaxComponentDepth = maxComponentDepth + for compo in self.components: + baseGlyph = glyfTable[compo.glyphName] + if baseGlyph.numberOfContours == 0: + continue + elif baseGlyph.numberOfContours > 0: + nP, nC = baseGlyph.getMaxpValues() + else: + nP, nC, componentDepth = baseGlyph.getCompositeMaxpValues( + glyfTable, initialMaxComponentDepth + 1 + ) + maxComponentDepth = max(maxComponentDepth, componentDepth) + nPoints = nPoints + nP + nContours = nContours + nC + return CompositeMaxpValues(nPoints, nContours, maxComponentDepth) + + def getMaxpValues(self): + assert self.numberOfContours > 0 + return len(self.coordinates), len(self.endPtsOfContours) + + def decompileComponents(self, data, glyfTable): + self.components = [] + more = 1 + haveInstructions = 0 + while more: + component = GlyphComponent() + more, haveInstr, data = component.decompile(data, glyfTable) + haveInstructions = haveInstructions | haveInstr + self.components.append(component) + if haveInstructions: + (numInstructions,) = struct.unpack(">h", data[:2]) + data = data[2:] + self.program = ttProgram.Program() + self.program.fromBytecode(data[:numInstructions]) + data = data[numInstructions:] + if len(data) >= 4: + log.warning( + "too much glyph data at the end of composite glyph: %d excess bytes", + len(data), + ) + + def decompileCoordinates(self, data): + endPtsOfContours = array.array("H") + endPtsOfContours.frombytes(data[: 2 * self.numberOfContours]) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + self.endPtsOfContours = endPtsOfContours.tolist() + + pos = 2 * self.numberOfContours + (instructionLength,) = struct.unpack(">h", data[pos : pos + 2]) + self.program = ttProgram.Program() + self.program.fromBytecode(data[pos + 2 : pos + 2 + instructionLength]) + pos += 2 + instructionLength + nCoordinates = self.endPtsOfContours[-1] + 1 + flags, xCoordinates, yCoordinates = self.decompileCoordinatesRaw( + nCoordinates, data, pos + ) + + # fill in repetitions and apply signs + self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates) + xIndex = 0 + yIndex = 0 + for i in range(nCoordinates): + flag = flags[i] + # x coordinate + if flag & flagXShort: + if flag & flagXsame: + x = xCoordinates[xIndex] + else: + x = -xCoordinates[xIndex] + xIndex = xIndex + 1 + elif flag & flagXsame: + x = 0 + else: + x = xCoordinates[xIndex] + xIndex = xIndex + 1 + # y coordinate + if flag & flagYShort: + if flag & flagYsame: + y = yCoordinates[yIndex] + else: + y = -yCoordinates[yIndex] + yIndex = yIndex + 1 + elif flag & flagYsame: + y = 0 + else: + y = yCoordinates[yIndex] + yIndex = yIndex + 1 + coordinates[i] = (x, y) + assert xIndex == len(xCoordinates) + assert yIndex == len(yCoordinates) + coordinates.relativeToAbsolute() + # discard all flags except "keepFlags" + for i in range(len(flags)): + flags[i] &= keepFlags + self.flags = flags + + def decompileCoordinatesRaw(self, nCoordinates, data, pos=0): + # unpack flags and prepare unpacking of coordinates + flags = bytearray(nCoordinates) + # Warning: deep Python trickery going on. We use the struct module to unpack + # the coordinates. We build a format string based on the flags, so we can + # unpack the coordinates in one struct.unpack() call. + xFormat = ">" # big endian + yFormat = ">" # big endian + j = 0 + while True: + flag = data[pos] + pos += 1 + repeat = 1 + if flag & flagRepeat: + repeat = data[pos] + 1 + pos += 1 + for k in range(repeat): + if flag & flagXShort: + xFormat = xFormat + "B" + elif not (flag & flagXsame): + xFormat = xFormat + "h" + if flag & flagYShort: + yFormat = yFormat + "B" + elif not (flag & flagYsame): + yFormat = yFormat + "h" + flags[j] = flag + j = j + 1 + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + # unpack raw coordinates, krrrrrr-tching! + xDataLen = struct.calcsize(xFormat) + yDataLen = struct.calcsize(yFormat) + if len(data) - pos - (xDataLen + yDataLen) >= 4: + log.warning( + "too much glyph data: %d excess bytes", + len(data) - pos - (xDataLen + yDataLen), + ) + xCoordinates = struct.unpack(xFormat, data[pos : pos + xDataLen]) + yCoordinates = struct.unpack( + yFormat, data[pos + xDataLen : pos + xDataLen + yDataLen] + ) + return flags, xCoordinates, yCoordinates + + def compileComponents(self, glyfTable): + data = b"" + lastcomponent = len(self.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(self.components)): + if i == lastcomponent: + haveInstructions = hasattr(self, "program") + more = 0 + compo = self.components[i] + data = data + compo.compile(more, haveInstructions, glyfTable) + if haveInstructions: + instructions = self.program.getBytecode() + data = data + struct.pack(">h", len(instructions)) + instructions + return data + + def compileCoordinates(self): + assert len(self.coordinates) == len(self.flags) + data = [] + endPtsOfContours = array.array("H", self.endPtsOfContours) + if sys.byteorder != "big": + endPtsOfContours.byteswap() + data.append(endPtsOfContours.tobytes()) + instructions = self.program.getBytecode() + data.append(struct.pack(">h", len(instructions))) + data.append(instructions) + + deltas = self.coordinates.copy() + deltas.toInt() + deltas.absoluteToRelative() + + # TODO(behdad): Add a configuration option for this? + deltas = self.compileDeltasGreedy(self.flags, deltas) + # deltas = self.compileDeltasOptimal(self.flags, deltas) + + data.extend(deltas) + return b"".join(data) + + def compileDeltasGreedy(self, flags, deltas): + # Implements greedy algorithm for packing coordinate deltas: + # uses shortest representation one coordinate at a time. + compressedFlags = bytearray() + compressedXs = bytearray() + compressedYs = bytearray() + lastflag = None + repeat = 0 + for flag, (x, y) in zip(flags, deltas): + # Oh, the horrors of TrueType + # do x + if x == 0: + flag = flag | flagXsame + elif -255 <= x <= 255: + flag = flag | flagXShort + if x > 0: + flag = flag | flagXsame + else: + x = -x + compressedXs.append(x) + else: + compressedXs.extend(struct.pack(">h", x)) + # do y + if y == 0: + flag = flag | flagYsame + elif -255 <= y <= 255: + flag = flag | flagYShort + if y > 0: + flag = flag | flagYsame + else: + y = -y + compressedYs.append(y) + else: + compressedYs.extend(struct.pack(">h", y)) + # handle repeating flags + if flag == lastflag and repeat != 255: + repeat = repeat + 1 + if repeat == 1: + compressedFlags.append(flag) + else: + compressedFlags[-2] = flag | flagRepeat + compressedFlags[-1] = repeat + else: + repeat = 0 + compressedFlags.append(flag) + lastflag = flag + return (compressedFlags, compressedXs, compressedYs) + + def compileDeltasOptimal(self, flags, deltas): + # Implements optimal, dynaic-programming, algorithm for packing coordinate + # deltas. The savings are negligible :(. + candidates = [] + bestTuple = None + bestCost = 0 + repeat = 0 + for flag, (x, y) in zip(flags, deltas): + # Oh, the horrors of TrueType + flag, coordBytes = flagBest(x, y, flag) + bestCost += 1 + coordBytes + newCandidates = [ + (bestCost, bestTuple, flag, coordBytes), + (bestCost + 1, bestTuple, (flag | flagRepeat), coordBytes), + ] + for lastCost, lastTuple, lastFlag, coordBytes in candidates: + if ( + lastCost + coordBytes <= bestCost + 1 + and (lastFlag & flagRepeat) + and (lastFlag < 0xFF00) + and flagSupports(lastFlag, flag) + ): + if (lastFlag & 0xFF) == ( + flag | flagRepeat + ) and lastCost == bestCost + 1: + continue + newCandidates.append( + (lastCost + coordBytes, lastTuple, lastFlag + 256, coordBytes) + ) + candidates = newCandidates + bestTuple = min(candidates, key=lambda t: t[0]) + bestCost = bestTuple[0] + + flags = [] + while bestTuple: + cost, bestTuple, flag, coordBytes = bestTuple + flags.append(flag) + flags.reverse() + + compressedFlags = bytearray() + compressedXs = bytearray() + compressedYs = bytearray() + coords = iter(deltas) + ff = [] + for flag in flags: + repeatCount, flag = flag >> 8, flag & 0xFF + compressedFlags.append(flag) + if flag & flagRepeat: + assert repeatCount > 0 + compressedFlags.append(repeatCount) + else: + assert repeatCount == 0 + for i in range(1 + repeatCount): + x, y = next(coords) + flagEncodeCoords(flag, x, y, compressedXs, compressedYs) + ff.append(flag) + try: + next(coords) + raise Exception("internal error") + except StopIteration: + pass + + return (compressedFlags, compressedXs, compressedYs) + + def recalcBounds(self, glyfTable, *, boundsDone=None): + """Recalculates the bounds of the glyph. + + Each glyph object stores its bounding box in the + ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be + recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds + must be provided to resolve component bounds. + """ + if self.isComposite() and self.tryRecalcBoundsComposite( + glyfTable, boundsDone=boundsDone + ): + return + try: + coords, endPts, flags = self.getCoordinates(glyfTable) + self.xMin, self.yMin, self.xMax, self.yMax = coords.calcIntBounds() + except NotImplementedError: + pass + + def tryRecalcBoundsComposite(self, glyfTable, *, boundsDone=None): + """Try recalculating the bounds of a composite glyph that has + certain constrained properties. Namely, none of the components + have a transform other than an integer translate, and none + uses the anchor points. + + Each glyph object stores its bounding box in the + ``xMin``/``yMin``/``xMax``/``yMax`` attributes. These bounds must be + recomputed when the ``coordinates`` change. The ``table__g_l_y_f`` bounds + must be provided to resolve component bounds. + + Return True if bounds were calculated, False otherwise. + """ + for compo in self.components: + if hasattr(compo, "firstPt") or hasattr(compo, "transform"): + return False + if not float(compo.x).is_integer() or not float(compo.y).is_integer(): + return False + + # All components are untransformed and have an integer x/y translate + bounds = None + for compo in self.components: + glyphName = compo.glyphName + g = glyfTable[glyphName] + + if boundsDone is None or glyphName not in boundsDone: + g.recalcBounds(glyfTable, boundsDone=boundsDone) + if boundsDone is not None: + boundsDone.add(glyphName) + # empty components shouldn't update the bounds of the parent glyph + if g.numberOfContours == 0: + continue + + x, y = compo.x, compo.y + bounds = updateBounds(bounds, (g.xMin + x, g.yMin + y)) + bounds = updateBounds(bounds, (g.xMax + x, g.yMax + y)) + + if bounds is None: + bounds = (0, 0, 0, 0) + self.xMin, self.yMin, self.xMax, self.yMax = bounds + return True + + def isComposite(self): + """Test whether a glyph has components""" + if hasattr(self, "data"): + return struct.unpack(">h", self.data[:2])[0] == -1 if self.data else False + else: + return self.numberOfContours == -1 + + def getCoordinates(self, glyfTable): + """Return the coordinates, end points and flags + + This method returns three values: A :py:class:`GlyphCoordinates` object, + a list of the indexes of the final points of each contour (allowing you + to split up the coordinates list into contours) and a list of flags. + + On simple glyphs, this method returns information from the glyph's own + contours; on composite glyphs, it "flattens" all components recursively + to return a list of coordinates representing all the components involved + in the glyph. + + To interpret the flags for each point, see the "Simple Glyph Flags" + section of the `glyf table specification <https://docs.microsoft.com/en-us/typography/opentype/spec/glyf#simple-glyph-description>`. + """ + + if self.numberOfContours > 0: + return self.coordinates, self.endPtsOfContours, self.flags + elif self.isComposite(): + # it's a composite + allCoords = GlyphCoordinates() + allFlags = bytearray() + allEndPts = [] + for compo in self.components: + g = glyfTable[compo.glyphName] + try: + coordinates, endPts, flags = g.getCoordinates(glyfTable) + except RecursionError: + raise ttLib.TTLibError( + "glyph '%s' contains a recursive component reference" + % compo.glyphName + ) + coordinates = GlyphCoordinates(coordinates) + if hasattr(compo, "firstPt"): + # component uses two reference points: we apply the transform _before_ + # computing the offset between the points + if hasattr(compo, "transform"): + coordinates.transform(compo.transform) + x1, y1 = allCoords[compo.firstPt] + x2, y2 = coordinates[compo.secondPt] + move = x1 - x2, y1 - y2 + coordinates.translate(move) + else: + # component uses XY offsets + move = compo.x, compo.y + if not hasattr(compo, "transform"): + coordinates.translate(move) + else: + apple_way = compo.flags & SCALED_COMPONENT_OFFSET + ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET + assert not (apple_way and ms_way) + if not (apple_way or ms_way): + scale_component_offset = ( + SCALE_COMPONENT_OFFSET_DEFAULT # see top of this file + ) + else: + scale_component_offset = apple_way + if scale_component_offset: + # the Apple way: first move, then scale (ie. scale the component offset) + coordinates.translate(move) + coordinates.transform(compo.transform) + else: + # the MS way: first scale, then move + coordinates.transform(compo.transform) + coordinates.translate(move) + offset = len(allCoords) + allEndPts.extend(e + offset for e in endPts) + allCoords.extend(coordinates) + allFlags.extend(flags) + return allCoords, allEndPts, allFlags + else: + return GlyphCoordinates(), [], bytearray() + + def getComponentNames(self, glyfTable): + """Returns a list of names of component glyphs used in this glyph + + This method can be used on simple glyphs (in which case it returns an + empty list) or composite glyphs. + """ + if not hasattr(self, "data"): + if self.isComposite(): + return [c.glyphName for c in self.components] + else: + return [] + + # Extract components without expanding glyph + + if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0: + return [] # Not composite + + data = self.data + i = 10 + components = [] + more = 1 + while more: + flags, glyphID = struct.unpack(">HH", data[i : i + 4]) + i += 4 + flags = int(flags) + components.append(glyfTable.getGlyphName(int(glyphID))) + + if flags & ARG_1_AND_2_ARE_WORDS: + i += 4 + else: + i += 2 + if flags & WE_HAVE_A_SCALE: + i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: + i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: + i += 8 + more = flags & MORE_COMPONENTS + + return components + + def trim(self, remove_hinting=False): + """Remove padding and, if requested, hinting, from a glyph. + This works on both expanded and compacted glyphs, without + expanding it.""" + if not hasattr(self, "data"): + if remove_hinting: + if self.isComposite(): + if hasattr(self, "program"): + del self.program + else: + self.program = ttProgram.Program() + self.program.fromBytecode([]) + # No padding to trim. + return + if not self.data: + return + numContours = struct.unpack(">h", self.data[:2])[0] + data = bytearray(self.data) + i = 10 + if numContours >= 0: + i += 2 * numContours # endPtsOfContours + nCoordinates = ((data[i - 2] << 8) | data[i - 1]) + 1 + instructionLen = (data[i] << 8) | data[i + 1] + if remove_hinting: + # Zero instruction length + data[i] = data[i + 1] = 0 + i += 2 + if instructionLen: + # Splice it out + data = data[:i] + data[i + instructionLen :] + instructionLen = 0 + else: + i += 2 + instructionLen + + coordBytes = 0 + j = 0 + while True: + flag = data[i] + i = i + 1 + repeat = 1 + if flag & flagRepeat: + repeat = data[i] + 1 + i = i + 1 + xBytes = yBytes = 0 + if flag & flagXShort: + xBytes = 1 + elif not (flag & flagXsame): + xBytes = 2 + if flag & flagYShort: + yBytes = 1 + elif not (flag & flagYsame): + yBytes = 2 + coordBytes += (xBytes + yBytes) * repeat + j += repeat + if j >= nCoordinates: + break + assert j == nCoordinates, "bad glyph flags" + i += coordBytes + # Remove padding + data = data[:i] + elif self.isComposite(): + more = 1 + we_have_instructions = False + while more: + flags = (data[i] << 8) | data[i + 1] + if remove_hinting: + flags &= ~WE_HAVE_INSTRUCTIONS + if flags & WE_HAVE_INSTRUCTIONS: + we_have_instructions = True + data[i + 0] = flags >> 8 + data[i + 1] = flags & 0xFF + i += 4 + flags = int(flags) + + if flags & ARG_1_AND_2_ARE_WORDS: + i += 4 + else: + i += 2 + if flags & WE_HAVE_A_SCALE: + i += 2 + elif flags & WE_HAVE_AN_X_AND_Y_SCALE: + i += 4 + elif flags & WE_HAVE_A_TWO_BY_TWO: + i += 8 + more = flags & MORE_COMPONENTS + if we_have_instructions: + instructionLen = (data[i] << 8) | data[i + 1] + i += 2 + instructionLen + # Remove padding + data = data[:i] + + self.data = data + + def removeHinting(self): + """Removes TrueType hinting instructions from the glyph.""" + self.trim(remove_hinting=True) + + def draw(self, pen, glyfTable, offset=0): + """Draws the glyph using the supplied pen object. + + Arguments: + pen: An object conforming to the pen protocol. + glyfTable: A :py:class:`table__g_l_y_f` object, to resolve components. + offset (int): A horizontal offset. If provided, all coordinates are + translated by this offset. + """ + + if self.isComposite(): + for component in self.components: + glyphName, transform = component.getComponentInfo() + pen.addComponent(glyphName, transform) + return + + coordinates, endPts, flags = self.getCoordinates(glyfTable) + if offset: + coordinates = coordinates.copy() + coordinates.translate((offset, 0)) + start = 0 + maybeInt = lambda v: int(v) if v == int(v) else v + for end in endPts: + end = end + 1 + contour = coordinates[start:end] + cFlags = [flagOnCurve & f for f in flags[start:end]] + cuFlags = [flagCubic & f for f in flags[start:end]] + start = end + if 1 not in cFlags: + assert all(cuFlags) or not any(cuFlags) + cubic = all(cuFlags) + if cubic: + count = len(contour) + assert count % 2 == 0, "Odd number of cubic off-curves undefined" + l = contour[-1] + f = contour[0] + p0 = (maybeInt((l[0] + f[0]) * 0.5), maybeInt((l[1] + f[1]) * 0.5)) + pen.moveTo(p0) + for i in range(0, count, 2): + p1 = contour[i] + p2 = contour[i + 1] + p4 = contour[i + 2 if i + 2 < count else 0] + p3 = ( + maybeInt((p2[0] + p4[0]) * 0.5), + maybeInt((p2[1] + p4[1]) * 0.5), + ) + pen.curveTo(p1, p2, p3) + else: + # There is not a single on-curve point on the curve, + # use pen.qCurveTo's special case by specifying None + # as the on-curve point. + contour.append(None) + pen.qCurveTo(*contour) + else: + # Shuffle the points so that the contour is guaranteed + # to *end* in an on-curve point, which we'll use for + # the moveTo. + firstOnCurve = cFlags.index(1) + 1 + contour = contour[firstOnCurve:] + contour[:firstOnCurve] + cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve] + cuFlags = cuFlags[firstOnCurve:] + cuFlags[:firstOnCurve] + pen.moveTo(contour[-1]) + while contour: + nextOnCurve = cFlags.index(1) + 1 + if nextOnCurve == 1: + # Skip a final lineTo(), as it is implied by + # pen.closePath() + if len(contour) > 1: + pen.lineTo(contour[0]) + else: + cubicFlags = [f for f in cuFlags[: nextOnCurve - 1]] + assert all(cubicFlags) or not any(cubicFlags) + cubic = any(cubicFlags) + if cubic: + assert all( + cubicFlags + ), "Mixed cubic and quadratic segment undefined" + + count = nextOnCurve + assert ( + count >= 3 + ), "At least two cubic off-curve points required" + assert ( + count - 1 + ) % 2 == 0, "Odd number of cubic off-curves undefined" + for i in range(0, count - 3, 2): + p1 = contour[i] + p2 = contour[i + 1] + p4 = contour[i + 2] + p3 = ( + maybeInt((p2[0] + p4[0]) * 0.5), + maybeInt((p2[1] + p4[1]) * 0.5), + ) + lastOnCurve = p3 + pen.curveTo(p1, p2, p3) + pen.curveTo(*contour[count - 3 : count]) + else: + pen.qCurveTo(*contour[:nextOnCurve]) + contour = contour[nextOnCurve:] + cFlags = cFlags[nextOnCurve:] + cuFlags = cuFlags[nextOnCurve:] + pen.closePath() + + def drawPoints(self, pen, glyfTable, offset=0): + """Draw the glyph using the supplied pointPen. As opposed to Glyph.draw(), + this will not change the point indices. + """ + + if self.isComposite(): + for component in self.components: + glyphName, transform = component.getComponentInfo() + pen.addComponent(glyphName, transform) + return + + coordinates, endPts, flags = self.getCoordinates(glyfTable) + if offset: + coordinates = coordinates.copy() + coordinates.translate((offset, 0)) + start = 0 + for end in endPts: + end = end + 1 + contour = coordinates[start:end] + cFlags = flags[start:end] + start = end + pen.beginPath() + # Start with the appropriate segment type based on the final segment + + if cFlags[-1] & flagOnCurve: + segmentType = "line" + elif cFlags[-1] & flagCubic: + segmentType = "curve" + else: + segmentType = "qcurve" + for i, pt in enumerate(contour): + if cFlags[i] & flagOnCurve: + pen.addPoint(pt, segmentType=segmentType) + segmentType = "line" + else: + pen.addPoint(pt) + segmentType = "curve" if cFlags[i] & flagCubic else "qcurve" + pen.endPath() + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + + +# Vector.__round__ uses the built-in (Banker's) `round` but we want +# to use otRound below +_roundv = partial(Vector.__round__, round=otRound) + + +def _is_mid_point(p0: tuple, p1: tuple, p2: tuple) -> bool: + # True if p1 is in the middle of p0 and p2, either before or after rounding + p0 = Vector(p0) + p1 = Vector(p1) + p2 = Vector(p2) + return ((p0 + p2) * 0.5).isclose(p1) or _roundv(p0) + _roundv(p2) == _roundv(p1) * 2 + + +def dropImpliedOnCurvePoints(*interpolatable_glyphs: Glyph) -> Set[int]: + """Drop impliable on-curve points from the (simple) glyph or glyphs. + + In TrueType glyf outlines, on-curve points can be implied when they are located at + the midpoint of the line connecting two consecutive off-curve points. + + If more than one glyphs are passed, these are assumed to be interpolatable masters + of the same glyph impliable, and thus only the on-curve points that are impliable + for all of them will actually be implied. + Composite glyphs or empty glyphs are skipped, only simple glyphs with 1 or more + contours are considered. + The input glyph(s) is/are modified in-place. + + Args: + interpolatable_glyphs: The glyph or glyphs to modify in-place. + + Returns: + The set of point indices that were dropped if any. + + Raises: + ValueError if simple glyphs are not in fact interpolatable because they have + different point flags or number of contours. + + Reference: + https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html + """ + staticAttributes = SimpleNamespace( + numberOfContours=None, flags=None, endPtsOfContours=None + ) + drop = None + simple_glyphs = [] + for i, glyph in enumerate(interpolatable_glyphs): + if glyph.numberOfContours < 1: + # ignore composite or empty glyphs + continue + + for attr in staticAttributes.__dict__: + expected = getattr(staticAttributes, attr) + found = getattr(glyph, attr) + if expected is None: + setattr(staticAttributes, attr, found) + elif expected != found: + raise ValueError( + f"Incompatible {attr} for glyph at master index {i}: " + f"expected {expected}, found {found}" + ) + + may_drop = set() + start = 0 + coords = glyph.coordinates + flags = staticAttributes.flags + endPtsOfContours = staticAttributes.endPtsOfContours + for last in endPtsOfContours: + for i in range(start, last + 1): + if not (flags[i] & flagOnCurve): + continue + prv = i - 1 if i > start else last + nxt = i + 1 if i < last else start + if (flags[prv] & flagOnCurve) or flags[prv] != flags[nxt]: + continue + # we may drop the ith on-curve if halfway between previous/next off-curves + if not _is_mid_point(coords[prv], coords[i], coords[nxt]): + continue + + may_drop.add(i) + start = last + 1 + # we only want to drop if ALL interpolatable glyphs have the same implied oncurves + if drop is None: + drop = may_drop + else: + drop.intersection_update(may_drop) + + simple_glyphs.append(glyph) + + if drop: + # Do the actual dropping + flags = staticAttributes.flags + assert flags is not None + newFlags = array.array( + "B", (flags[i] for i in range(len(flags)) if i not in drop) + ) + + endPts = staticAttributes.endPtsOfContours + assert endPts is not None + newEndPts = [] + i = 0 + delta = 0 + for d in sorted(drop): + while d > endPts[i]: + newEndPts.append(endPts[i] - delta) + i += 1 + delta += 1 + while i < len(endPts): + newEndPts.append(endPts[i] - delta) + i += 1 + + for glyph in simple_glyphs: + coords = glyph.coordinates + glyph.coordinates = GlyphCoordinates( + coords[i] for i in range(len(coords)) if i not in drop + ) + glyph.flags = newFlags + glyph.endPtsOfContours = newEndPts + + return drop if drop is not None else set() + + +class GlyphComponent(object): + """Represents a component within a composite glyph. + + The component is represented internally with four attributes: ``glyphName``, + ``x``, ``y`` and ``transform``. If there is no "two-by-two" matrix (i.e + no scaling, reflection, or rotation; only translation), the ``transform`` + attribute is not present. + """ + + # The above documentation is not *completely* true, but is *true enough* because + # the rare firstPt/lastPt attributes are not totally supported and nobody seems to + # mind - see below. + + def __init__(self): + pass + + def getComponentInfo(self): + """Return information about the component + + This method returns a tuple of two values: the glyph name of the component's + base glyph, and a transformation matrix. As opposed to accessing the attributes + directly, ``getComponentInfo`` always returns a six-element tuple of the + component's transformation matrix, even when the two-by-two ``.transform`` + matrix is not present. + """ + # XXX Ignoring self.firstPt & self.lastpt for now: I need to implement + # something equivalent in fontTools.objects.glyph (I'd rather not + # convert it to an absolute offset, since it is valuable information). + # This method will now raise "AttributeError: x" on glyphs that use + # this TT feature. + if hasattr(self, "transform"): + [[xx, xy], [yx, yy]] = self.transform + trans = (xx, xy, yx, yy, self.x, self.y) + else: + trans = (1, 0, 0, 1, self.x, self.y) + return self.glyphName, trans + + def decompile(self, data, glyfTable): + flags, glyphID = struct.unpack(">HH", data[:4]) + self.flags = int(flags) + glyphID = int(glyphID) + self.glyphName = glyfTable.getGlyphName(int(glyphID)) + data = data[4:] + + if self.flags & ARG_1_AND_2_ARE_WORDS: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">hh", data[:4]) + else: + x, y = struct.unpack(">HH", data[:4]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[4:] + else: + if self.flags & ARGS_ARE_XY_VALUES: + self.x, self.y = struct.unpack(">bb", data[:2]) + else: + x, y = struct.unpack(">BB", data[:2]) + self.firstPt, self.secondPt = int(x), int(y) + data = data[2:] + + if self.flags & WE_HAVE_A_SCALE: + (scale,) = struct.unpack(">h", data[:2]) + self.transform = [ + [fi2fl(scale, 14), 0], + [0, fi2fl(scale, 14)], + ] # fixed 2.14 + data = data[2:] + elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE: + xscale, yscale = struct.unpack(">hh", data[:4]) + self.transform = [ + [fi2fl(xscale, 14), 0], + [0, fi2fl(yscale, 14)], + ] # fixed 2.14 + data = data[4:] + elif self.flags & WE_HAVE_A_TWO_BY_TWO: + (xscale, scale01, scale10, yscale) = struct.unpack(">hhhh", data[:8]) + self.transform = [ + [fi2fl(xscale, 14), fi2fl(scale01, 14)], + [fi2fl(scale10, 14), fi2fl(yscale, 14)], + ] # fixed 2.14 + data = data[8:] + more = self.flags & MORE_COMPONENTS + haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS + self.flags = self.flags & ( + ROUND_XY_TO_GRID + | USE_MY_METRICS + | SCALED_COMPONENT_OFFSET + | UNSCALED_COMPONENT_OFFSET + | NON_OVERLAPPING + | OVERLAP_COMPOUND + ) + return more, haveInstructions, data + + def compile(self, more, haveInstructions, glyfTable): + data = b"" + + # reset all flags we will calculate ourselves + flags = self.flags & ( + ROUND_XY_TO_GRID + | USE_MY_METRICS + | SCALED_COMPONENT_OFFSET + | UNSCALED_COMPONENT_OFFSET + | NON_OVERLAPPING + | OVERLAP_COMPOUND + ) + if more: + flags = flags | MORE_COMPONENTS + if haveInstructions: + flags = flags | WE_HAVE_INSTRUCTIONS + + if hasattr(self, "firstPt"): + if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255): + data = data + struct.pack(">BB", self.firstPt, self.secondPt) + else: + data = data + struct.pack(">HH", self.firstPt, self.secondPt) + flags = flags | ARG_1_AND_2_ARE_WORDS + else: + x = otRound(self.x) + y = otRound(self.y) + flags = flags | ARGS_ARE_XY_VALUES + if (-128 <= x <= 127) and (-128 <= y <= 127): + data = data + struct.pack(">bb", x, y) + else: + data = data + struct.pack(">hh", x, y) + flags = flags | ARG_1_AND_2_ARE_WORDS + + if hasattr(self, "transform"): + transform = [[fl2fi(x, 14) for x in row] for row in self.transform] + if transform[0][1] or transform[1][0]: + flags = flags | WE_HAVE_A_TWO_BY_TWO + data = data + struct.pack( + ">hhhh", + transform[0][0], + transform[0][1], + transform[1][0], + transform[1][1], + ) + elif transform[0][0] != transform[1][1]: + flags = flags | WE_HAVE_AN_X_AND_Y_SCALE + data = data + struct.pack(">hh", transform[0][0], transform[1][1]) + else: + flags = flags | WE_HAVE_A_SCALE + data = data + struct.pack(">h", transform[0][0]) + + glyphID = glyfTable.getGlyphID(self.glyphName) + return struct.pack(">HH", flags, glyphID) + data + + def toXML(self, writer, ttFont): + attrs = [("glyphName", self.glyphName)] + if not hasattr(self, "firstPt"): + attrs = attrs + [("x", self.x), ("y", self.y)] + else: + attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)] + + if hasattr(self, "transform"): + transform = self.transform + if transform[0][1] or transform[1][0]: + attrs = attrs + [ + ("scalex", fl2str(transform[0][0], 14)), + ("scale01", fl2str(transform[0][1], 14)), + ("scale10", fl2str(transform[1][0], 14)), + ("scaley", fl2str(transform[1][1], 14)), + ] + elif transform[0][0] != transform[1][1]: + attrs = attrs + [ + ("scalex", fl2str(transform[0][0], 14)), + ("scaley", fl2str(transform[1][1], 14)), + ] + else: + attrs = attrs + [("scale", fl2str(transform[0][0], 14))] + attrs = attrs + [("flags", hex(self.flags))] + writer.simpletag("component", attrs) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + self.glyphName = attrs["glyphName"] + if "firstPt" in attrs: + self.firstPt = safeEval(attrs["firstPt"]) + self.secondPt = safeEval(attrs["secondPt"]) + else: + self.x = safeEval(attrs["x"]) + self.y = safeEval(attrs["y"]) + if "scale01" in attrs: + scalex = str2fl(attrs["scalex"], 14) + scale01 = str2fl(attrs["scale01"], 14) + scale10 = str2fl(attrs["scale10"], 14) + scaley = str2fl(attrs["scaley"], 14) + self.transform = [[scalex, scale01], [scale10, scaley]] + elif "scalex" in attrs: + scalex = str2fl(attrs["scalex"], 14) + scaley = str2fl(attrs["scaley"], 14) + self.transform = [[scalex, 0], [0, scaley]] + elif "scale" in attrs: + scale = str2fl(attrs["scale"], 14) + self.transform = [[scale, 0], [0, scale]] + self.flags = safeEval(attrs["flags"]) + + def __eq__(self, other): + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + result = self.__eq__(other) + return result if result is NotImplemented else not result + + +class GlyphCoordinates(object): + """A list of glyph coordinates. + + Unlike an ordinary list, this is a numpy-like matrix object which supports + matrix addition, scalar multiplication and other operations described below. + """ + + def __init__(self, iterable=[]): + self._a = array.array("d") + self.extend(iterable) + + @property + def array(self): + """Returns the underlying array of coordinates""" + return self._a + + @staticmethod + def zeros(count): + """Creates a new ``GlyphCoordinates`` object with all coordinates set to (0,0)""" + g = GlyphCoordinates() + g._a.frombytes(bytes(count * 2 * g._a.itemsize)) + return g + + def copy(self): + """Creates a new ``GlyphCoordinates`` object which is a copy of the current one.""" + c = GlyphCoordinates() + c._a.extend(self._a) + return c + + def __len__(self): + """Returns the number of coordinates in the array.""" + return len(self._a) // 2 + + def __getitem__(self, k): + """Returns a two element tuple (x,y)""" + a = self._a + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + # Instead of calling ourselves recursively, duplicate code; faster + ret = [] + for k in indices: + x = a[2 * k] + y = a[2 * k + 1] + ret.append( + (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y) + ) + return ret + x = a[2 * k] + y = a[2 * k + 1] + return (int(x) if x.is_integer() else x, int(y) if y.is_integer() else y) + + def __setitem__(self, k, v): + """Sets a point's coordinates to a two element tuple (x,y)""" + if isinstance(k, slice): + indices = range(*k.indices(len(self))) + # XXX This only works if len(v) == len(indices) + for j, i in enumerate(indices): + self[i] = v[j] + return + self._a[2 * k], self._a[2 * k + 1] = v + + def __delitem__(self, i): + """Removes a point from the list""" + i = (2 * i) % len(self._a) + del self._a[i] + del self._a[i] + + def __repr__(self): + return "GlyphCoordinates([" + ",".join(str(c) for c in self) + "])" + + def append(self, p): + self._a.extend(tuple(p)) + + def extend(self, iterable): + for p in iterable: + self._a.extend(p) + + def toInt(self, *, round=otRound): + if round is noRound: + return + a = self._a + for i in range(len(a)): + a[i] = round(a[i]) + + def calcBounds(self): + a = self._a + if not a: + return 0, 0, 0, 0 + xs = a[0::2] + ys = a[1::2] + return min(xs), min(ys), max(xs), max(ys) + + def calcIntBounds(self, round=otRound): + return tuple(round(v) for v in self.calcBounds()) + + def relativeToAbsolute(self): + a = self._a + x, y = 0, 0 + for i in range(0, len(a), 2): + a[i] = x = a[i] + x + a[i + 1] = y = a[i + 1] + y + + def absoluteToRelative(self): + a = self._a + x, y = 0, 0 + for i in range(0, len(a), 2): + nx = a[i] + ny = a[i + 1] + a[i] = nx - x + a[i + 1] = ny - y + x = nx + y = ny + + def translate(self, p): + """ + >>> GlyphCoordinates([(1,2)]).translate((.5,0)) + """ + x, y = p + if x == 0 and y == 0: + return + a = self._a + for i in range(0, len(a), 2): + a[i] += x + a[i + 1] += y + + def scale(self, p): + """ + >>> GlyphCoordinates([(1,2)]).scale((.5,0)) + """ + x, y = p + if x == 1 and y == 1: + return + a = self._a + for i in range(0, len(a), 2): + a[i] *= x + a[i + 1] *= y + + def transform(self, t): + """ + >>> GlyphCoordinates([(1,2)]).transform(((.5,0),(.2,.5))) + """ + a = self._a + for i in range(0, len(a), 2): + x = a[i] + y = a[i + 1] + px = x * t[0][0] + y * t[1][0] + py = x * t[0][1] + y * t[1][1] + a[i] = px + a[i + 1] = py + + def __eq__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g == g2 + True + >>> g == g3 + False + >>> g2 == g3 + False + """ + if type(self) != type(other): + return NotImplemented + return self._a == other._a + + def __ne__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g2 = GlyphCoordinates([(1.0,2)]) + >>> g3 = GlyphCoordinates([(1.5,2)]) + >>> g != g2 + False + >>> g != g3 + True + >>> g2 != g3 + True + """ + result = self.__eq__(other) + return result if result is NotImplemented else not result + + # Math operations + + def __pos__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = +g + >>> g2 + GlyphCoordinates([(1, 2)]) + >>> g2.translate((1,0)) + >>> g2 + GlyphCoordinates([(2, 2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + return self.copy() + + def __neg__(self): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g + GlyphCoordinates([(1, 2)]) + >>> g2 = -g + >>> g2 + GlyphCoordinates([(-1, -2)]) + >>> g + GlyphCoordinates([(1, 2)]) + """ + r = self.copy() + a = r._a + for i in range(len(a)): + a[i] = -a[i] + return r + + def __round__(self, *, round=otRound): + r = self.copy() + r.toInt(round=round) + return r + + def __add__(self, other): + return self.copy().__iadd__(other) + + def __sub__(self, other): + return self.copy().__isub__(other) + + def __mul__(self, other): + return self.copy().__imul__(other) + + def __truediv__(self, other): + return self.copy().__itruediv__(other) + + __radd__ = __add__ + __rmul__ = __mul__ + + def __rsub__(self, other): + return other + (-self) + + def __iadd__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g += (.5,0) + >>> g + GlyphCoordinates([(1.5, 2)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g += g2 + >>> g + GlyphCoordinates([(4.5, 6)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate(other) + return self + if isinstance(other, GlyphCoordinates): + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] += other[i] + return self + return NotImplemented + + def __isub__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g -= (.5,0) + >>> g + GlyphCoordinates([(0.5, 2)]) + >>> g2 = GlyphCoordinates([(3,4)]) + >>> g -= g2 + >>> g + GlyphCoordinates([(-2.5, -2)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.translate((-other[0], -other[1])) + return self + if isinstance(other, GlyphCoordinates): + other = other._a + a = self._a + assert len(a) == len(other) + for i in range(len(a)): + a[i] -= other[i] + return self + return NotImplemented + + def __imul__(self, other): + """ + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= (2,.5) + >>> g *= 2 + >>> g + GlyphCoordinates([(4, 2)]) + >>> g = GlyphCoordinates([(1,2)]) + >>> g *= 2 + >>> g + GlyphCoordinates([(2, 4)]) + """ + if isinstance(other, tuple): + assert len(other) == 2 + self.scale(other) + return self + if isinstance(other, Number): + if other == 1: + return self + a = self._a + for i in range(len(a)): + a[i] *= other + return self + return NotImplemented + + def __itruediv__(self, other): + """ + >>> g = GlyphCoordinates([(1,3)]) + >>> g /= (.5,1.5) + >>> g /= 2 + >>> g + GlyphCoordinates([(1, 1)]) + """ + if isinstance(other, Number): + other = (other, other) + if isinstance(other, tuple): + if other == (1, 1): + return self + assert len(other) == 2 + self.scale((1.0 / other[0], 1.0 / other[1])) + return self + return NotImplemented + + def __bool__(self): + """ + >>> g = GlyphCoordinates([]) + >>> bool(g) + False + >>> g = GlyphCoordinates([(0,0), (0.,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,0), (1,0)]) + >>> bool(g) + True + >>> g = GlyphCoordinates([(0,.5), (0,0)]) + >>> bool(g) + True + """ + return bool(self._a) + + __nonzero__ = __bool__ + + +if __name__ == "__main__": + import doctest, sys + + sys.exit(doctest.testmod().failed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_m_t_x.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_m_t_x.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbdd7f9850a58bfcc9ff073860d394e0467b945 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_h_m_t_x.py @@ -0,0 +1,151 @@ +from fontTools.misc.roundTools import otRound +from fontTools import ttLib +from fontTools.misc.textTools import safeEval +from . import DefaultTable +import sys +import struct +import array +import logging + + +log = logging.getLogger(__name__) + + +class table__h_m_t_x(DefaultTable.DefaultTable): + headerTag = "hhea" + advanceName = "width" + sideBearingName = "lsb" + numberOfMetricsName = "numberOfHMetrics" + longMetricFormat = "Hh" + + def decompile(self, data, ttFont): + numGlyphs = ttFont["maxp"].numGlyphs + headerTable = ttFont.get(self.headerTag) + if headerTable is not None: + numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName)) + else: + numberOfMetrics = numGlyphs + if numberOfMetrics > numGlyphs: + log.warning( + "The %s.%s exceeds the maxp.numGlyphs" + % (self.headerTag, self.numberOfMetricsName) + ) + numberOfMetrics = numGlyphs + if len(data) < 4 * numberOfMetrics: + raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag) + # Note: advanceWidth is unsigned, but some font editors might + # read/write as signed. We can't be sure whether it was a mistake + # or not, so we read as unsigned but also issue a warning... + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics]) + data = data[4 * numberOfMetrics :] + numberOfSideBearings = numGlyphs - numberOfMetrics + sideBearings = array.array("h", data[: 2 * numberOfSideBearings]) + data = data[2 * numberOfSideBearings :] + + if sys.byteorder != "big": + sideBearings.byteswap() + if data: + log.warning("too much '%s' table data" % self.tableTag) + self.metrics = {} + glyphOrder = ttFont.getGlyphOrder() + for i in range(numberOfMetrics): + glyphName = glyphOrder[i] + advanceWidth, lsb = metrics[i * 2 : i * 2 + 2] + if advanceWidth > 32767: + log.warning( + "Glyph %r has a huge advance %s (%d); is it intentional or " + "an (invalid) negative value?", + glyphName, + self.advanceName, + advanceWidth, + ) + self.metrics[glyphName] = (advanceWidth, lsb) + lastAdvance = metrics[-2] + for i in range(numberOfSideBearings): + glyphName = glyphOrder[i + numberOfMetrics] + self.metrics[glyphName] = (lastAdvance, sideBearings[i]) + + def compile(self, ttFont): + metrics = [] + hasNegativeAdvances = False + for glyphName in ttFont.getGlyphOrder(): + advanceWidth, sideBearing = self.metrics[glyphName] + if advanceWidth < 0: + log.error( + "Glyph %r has negative advance %s" % (glyphName, self.advanceName) + ) + hasNegativeAdvances = True + metrics.append([advanceWidth, sideBearing]) + + headerTable = ttFont.get(self.headerTag) + if headerTable is not None: + lastAdvance = metrics[-1][0] + lastIndex = len(metrics) + while metrics[lastIndex - 2][0] == lastAdvance: + lastIndex -= 1 + if lastIndex <= 1: + # all advances are equal + lastIndex = 1 + break + additionalMetrics = metrics[lastIndex:] + additionalMetrics = [otRound(sb) for _, sb in additionalMetrics] + metrics = metrics[:lastIndex] + numberOfMetrics = len(metrics) + setattr(headerTable, self.numberOfMetricsName, numberOfMetrics) + else: + # no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs + numberOfMetrics = ttFont["maxp"].numGlyphs + additionalMetrics = [] + + allMetrics = [] + for advance, sb in metrics: + allMetrics.extend([otRound(advance), otRound(sb)]) + metricsFmt = ">" + self.longMetricFormat * numberOfMetrics + try: + data = struct.pack(metricsFmt, *allMetrics) + except struct.error as e: + if "out of range" in str(e) and hasNegativeAdvances: + raise ttLib.TTLibError( + "'%s' table can't contain negative advance %ss" + % (self.tableTag, self.advanceName) + ) + else: + raise + additionalMetrics = array.array("h", additionalMetrics) + if sys.byteorder != "big": + additionalMetrics.byteswap() + data = data + additionalMetrics.tobytes() + return data + + def toXML(self, writer, ttFont): + names = sorted(self.metrics.keys()) + for glyphName in names: + advance, sb = self.metrics[glyphName] + writer.simpletag( + "mtx", + [ + ("name", glyphName), + (self.advanceName, advance), + (self.sideBearingName, sb), + ], + ) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "metrics"): + self.metrics = {} + if name == "mtx": + self.metrics[attrs["name"]] = ( + safeEval(attrs[self.advanceName]), + safeEval(attrs[self.sideBearingName]), + ) + + def __delitem__(self, glyphName): + del self.metrics[glyphName] + + def __getitem__(self, glyphName): + return self.metrics[glyphName] + + def __setitem__(self, glyphName, advance_sb_pair): + self.metrics[glyphName] = tuple(advance_sb_pair) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_o_c_a.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_o_c_a.py new file mode 100644 index 0000000000000000000000000000000000000000..39c0c9e39ba9d60367f5f7072f072a7fba1cd656 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_o_c_a.py @@ -0,0 +1,62 @@ +from . import DefaultTable +import sys +import array +import logging + + +log = logging.getLogger(__name__) + + +class table__l_o_c_a(DefaultTable.DefaultTable): + dependencies = ["glyf"] + + def decompile(self, data, ttFont): + longFormat = ttFont["head"].indexToLocFormat + if longFormat: + format = "I" + else: + format = "H" + locations = array.array(format) + locations.frombytes(data) + if sys.byteorder != "big": + locations.byteswap() + if not longFormat: + locations = array.array("I", (2 * l for l in locations)) + if len(locations) < (ttFont["maxp"].numGlyphs + 1): + log.warning( + "corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d", + len(locations) - 1, + ttFont["maxp"].numGlyphs, + ) + self.locations = locations + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations): + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + ttFont["head"].indexToLocFormat = 0 + else: + locations = array.array("I", self.locations) + ttFont["head"].indexToLocFormat = 1 + if sys.byteorder != "big": + locations.byteswap() + return locations.tobytes() + + def set(self, locations): + self.locations = array.array("I", locations) + + def toXML(self, writer, ttFont): + writer.comment("The 'loca' table will be calculated by the compiler") + writer.newline() + + def __getitem__(self, index): + return self.locations[index] + + def __len__(self): + return len(self.locations) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py new file mode 100644 index 0000000000000000000000000000000000000000..24f5e131f0c615dcf86b0494854d9a3a5a1284f2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_l_t_a_g.py @@ -0,0 +1,64 @@ +from fontTools.misc.textTools import bytesjoin, tobytes, safeEval +from . import DefaultTable +import struct + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html + + +class table__l_t_a_g(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version, self.flags = 1, 0 + self.tags = [] + + def addTag(self, tag): + """Add 'tag' to the list of langauge tags if not already there. + + Returns the integer index of 'tag' in the list of all tags. + """ + try: + return self.tags.index(tag) + except ValueError: + self.tags.append(tag) + return len(self.tags) - 1 + + def decompile(self, data, ttFont): + self.version, self.flags, numTags = struct.unpack(">LLL", data[:12]) + assert self.version == 1 + self.tags = [] + for i in range(numTags): + pos = 12 + i * 4 + offset, length = struct.unpack(">HH", data[pos : pos + 4]) + tag = data[offset : offset + length].decode("ascii") + self.tags.append(tag) + + def compile(self, ttFont): + dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))] + stringPool = "" + for tag in self.tags: + offset = stringPool.find(tag) + if offset < 0: + offset = len(stringPool) + stringPool = stringPool + tag + offset = offset + 12 + len(self.tags) * 4 + dataList.append(struct.pack(">HH", offset, len(tag))) + dataList.append(tobytes(stringPool)) + return bytesjoin(dataList) + + def toXML(self, writer, ttFont): + writer.simpletag("version", value=self.version) + writer.newline() + writer.simpletag("flags", value=self.flags) + writer.newline() + for tag in self.tags: + writer.simpletag("LanguageTag", tag=tag) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if not hasattr(self, "tags"): + self.tags = [] + if name == "LanguageTag": + self.tags.append(attrs["tag"]) + elif "value" in attrs: + value = safeEval(attrs["value"]) + setattr(self, name, value) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_o_s_t.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_o_s_t.py new file mode 100644 index 0000000000000000000000000000000000000000..dba637117a0ac148af65c75853dd3bffbbbd1154 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_o_s_t.py @@ -0,0 +1,308 @@ +from fontTools import ttLib +from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder +from fontTools.misc import sstruct +from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex +from . import DefaultTable +import sys +import struct +import array +import logging + +log = logging.getLogger(__name__) + +postFormat = """ + > + formatType: 16.16F + italicAngle: 16.16F # italic angle in degrees + underlinePosition: h + underlineThickness: h + isFixedPitch: L + minMemType42: L # minimum memory if TrueType font is downloaded + maxMemType42: L # maximum memory if TrueType font is downloaded + minMemType1: L # minimum memory if Type1 font is downloaded + maxMemType1: L # maximum memory if Type1 font is downloaded +""" + +postFormatSize = sstruct.calcsize(postFormat) + + +class table__p_o_s_t(DefaultTable.DefaultTable): + def decompile(self, data, ttFont): + sstruct.unpack(postFormat, data[:postFormatSize], self) + data = data[postFormatSize:] + if self.formatType == 1.0: + self.decode_format_1_0(data, ttFont) + elif self.formatType == 2.0: + self.decode_format_2_0(data, ttFont) + elif self.formatType == 3.0: + self.decode_format_3_0(data, ttFont) + elif self.formatType == 4.0: + self.decode_format_4_0(data, ttFont) + else: + # supported format + raise ttLib.TTLibError( + "'post' table format %f not supported" % self.formatType + ) + + def compile(self, ttFont): + data = sstruct.pack(postFormat, self) + if self.formatType == 1.0: + pass # we're done + elif self.formatType == 2.0: + data = data + self.encode_format_2_0(ttFont) + elif self.formatType == 3.0: + pass # we're done + elif self.formatType == 4.0: + data = data + self.encode_format_4_0(ttFont) + else: + # supported format + raise ttLib.TTLibError( + "'post' table format %f not supported" % self.formatType + ) + return data + + def getGlyphOrder(self): + """This function will get called by a ttLib.TTFont instance. + Do not call this function yourself, use TTFont().getGlyphOrder() + or its relatives instead! + """ + if not hasattr(self, "glyphOrder"): + raise ttLib.TTLibError("illegal use of getGlyphOrder()") + glyphOrder = self.glyphOrder + del self.glyphOrder + return glyphOrder + + def decode_format_1_0(self, data, ttFont): + self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs] + + def decode_format_2_0(self, data, ttFont): + (numGlyphs,) = struct.unpack(">H", data[:2]) + numGlyphs = int(numGlyphs) + if numGlyphs > ttFont["maxp"].numGlyphs: + # Assume the numGlyphs field is bogus, so sync with maxp. + # I've seen this in one font, and if the assumption is + # wrong elsewhere, well, so be it: it's hard enough to + # work around _one_ non-conforming post format... + numGlyphs = ttFont["maxp"].numGlyphs + data = data[2:] + indices = array.array("H") + indices.frombytes(data[: 2 * numGlyphs]) + if sys.byteorder != "big": + indices.byteswap() + data = data[2 * numGlyphs :] + maxIndex = max(indices) + self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257) + self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs) + for glyphID in range(numGlyphs): + index = indices[glyphID] + if index > 257: + try: + name = extraNames[index - 258] + except IndexError: + name = "" + else: + # fetch names from standard list + name = standardGlyphOrder[index] + glyphOrder[glyphID] = name + self.build_psNameMapping(ttFont) + + def build_psNameMapping(self, ttFont): + mapping = {} + allNames = {} + for i in range(ttFont["maxp"].numGlyphs): + glyphName = psName = self.glyphOrder[i] + if glyphName == "": + glyphName = "glyph%.5d" % i + if glyphName in allNames: + # make up a new glyphName that's unique + n = allNames[glyphName] + while (glyphName + "#" + str(n)) in allNames: + n += 1 + allNames[glyphName] = n + 1 + glyphName = glyphName + "#" + str(n) + + self.glyphOrder[i] = glyphName + allNames[glyphName] = 1 + if glyphName != psName: + mapping[glyphName] = psName + + self.mapping = mapping + + def decode_format_3_0(self, data, ttFont): + # Setting self.glyphOrder to None will cause the TTFont object + # try and construct glyph names from a Unicode cmap table. + self.glyphOrder = None + + def decode_format_4_0(self, data, ttFont): + from fontTools import agl + + numGlyphs = ttFont["maxp"].numGlyphs + indices = array.array("H") + indices.frombytes(data) + if sys.byteorder != "big": + indices.byteswap() + # In some older fonts, the size of the post table doesn't match + # the number of glyphs. Sometimes it's bigger, sometimes smaller. + self.glyphOrder = glyphOrder = [""] * int(numGlyphs) + for i in range(min(len(indices), numGlyphs)): + if indices[i] == 0xFFFF: + self.glyphOrder[i] = "" + elif indices[i] in agl.UV2AGL: + self.glyphOrder[i] = agl.UV2AGL[indices[i]] + else: + self.glyphOrder[i] = "uni%04X" % indices[i] + self.build_psNameMapping(ttFont) + + def encode_format_2_0(self, ttFont): + numGlyphs = ttFont["maxp"].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + extraDict = {} + extraNames = self.extraNames = [ + n for n in self.extraNames if n not in standardGlyphOrder + ] + for i in range(len(extraNames)): + extraDict[extraNames[i]] = i + for glyphID in range(numGlyphs): + glyphName = glyphOrder[glyphID] + if glyphName in self.mapping: + psName = self.mapping[glyphName] + else: + psName = glyphName + if psName in extraDict: + index = 258 + extraDict[psName] + elif psName in standardGlyphOrder: + index = standardGlyphOrder.index(psName) + else: + index = 258 + len(extraNames) + extraDict[psName] = len(extraNames) + extraNames.append(psName) + indices.append(index) + if sys.byteorder != "big": + indices.byteswap() + return ( + struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames) + ) + + def encode_format_4_0(self, ttFont): + from fontTools import agl + + numGlyphs = ttFont["maxp"].numGlyphs + glyphOrder = ttFont.getGlyphOrder() + assert len(glyphOrder) == numGlyphs + indices = array.array("H") + for glyphID in glyphOrder: + glyphID = glyphID.split("#")[0] + if glyphID in agl.AGL2UV: + indices.append(agl.AGL2UV[glyphID]) + elif len(glyphID) == 7 and glyphID[:3] == "uni": + indices.append(int(glyphID[3:], 16)) + else: + indices.append(0xFFFF) + if sys.byteorder != "big": + indices.byteswap() + return indices.tobytes() + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(postFormat) + for name in names: + value = getattr(self, name) + writer.simpletag(name, value=value) + writer.newline() + if hasattr(self, "mapping"): + writer.begintag("psNames") + writer.newline() + writer.comment( + "This file uses unique glyph names based on the information\n" + "found in the 'post' table. Since these names might not be unique,\n" + "we have to invent artificial names in case of clashes. In order to\n" + "be able to retain the original information, we need a name to\n" + "ps name mapping for those cases where they differ. That's what\n" + "you see below.\n" + ) + writer.newline() + items = sorted(self.mapping.items()) + for name, psName in items: + writer.simpletag("psName", name=name, psName=psName) + writer.newline() + writer.endtag("psNames") + writer.newline() + if hasattr(self, "extraNames"): + writer.begintag("extraNames") + writer.newline() + writer.comment( + "following are the name that are not taken from the standard Mac glyph order" + ) + writer.newline() + for name in self.extraNames: + writer.simpletag("psName", name=name) + writer.newline() + writer.endtag("extraNames") + writer.newline() + if hasattr(self, "data"): + writer.begintag("hexdata") + writer.newline() + writer.dumphex(self.data) + writer.endtag("hexdata") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name not in ("psNames", "extraNames", "hexdata"): + setattr(self, name, safeEval(attrs["value"])) + elif name == "psNames": + self.mapping = {} + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.mapping[attrs["name"]] = attrs["psName"] + elif name == "extraNames": + self.extraNames = [] + for element in content: + if not isinstance(element, tuple): + continue + name, attrs, content = element + if name == "psName": + self.extraNames.append(attrs["name"]) + else: + self.data = readHex(content) + + +def unpackPStrings(data, n): + # extract n Pascal strings from data. + # if there is not enough data, use "" + + strings = [] + index = 0 + dataLen = len(data) + + for _ in range(n): + if dataLen <= index: + length = 0 + else: + length = byteord(data[index]) + index += 1 + + if dataLen <= index + length - 1: + name = "" + else: + name = tostr(data[index : index + length], encoding="latin1") + strings.append(name) + index += length + + if index < dataLen: + log.warning("%d extra bytes in post.stringData array", dataLen - index) + + elif dataLen < index: + log.warning("not enough data in post.stringData array") + + return strings + + +def packPStrings(strings): + data = b"" + for s in strings: + data = data + bytechr(len(s)) + tobytes(s, encoding="latin1") + return data diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_e_p.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_e_p.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b92f3e924ba2f20ade9a6cca45ce78284ffe21 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_e_p.py @@ -0,0 +1,7 @@ +from fontTools import ttLib + +superclass = ttLib.getTableClass("fpgm") + + +class table__p_r_e_p(superclass): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_o_p.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_o_p.py new file mode 100644 index 0000000000000000000000000000000000000000..aead9d72062e878d5e497f263a4f08eddbb048f6 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_p_r_o_p.py @@ -0,0 +1,6 @@ +from .otBase import BaseTTXConverter + + +# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html +class table__p_r_o_p(BaseTTXConverter): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_s_b_i_x.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_s_b_i_x.py new file mode 100644 index 0000000000000000000000000000000000000000..29b82c3e43e8bd199a841c577774885d92499aba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_s_b_i_x.py @@ -0,0 +1,119 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval, num2binary, binary2num +from . import DefaultTable +from .sbixStrike import Strike + + +sbixHeaderFormat = """ + > + version: H # Version number (set to 1) + flags: H # The only two bits used in the flags field are bits 0 + # and 1. For historical reasons, bit 0 must always be 1. + # Bit 1 is a sbixDrawOutlines flag and is interpreted as + # follows: + # 0: Draw only 'sbix' bitmaps + # 1: Draw both 'sbix' bitmaps and outlines, in that + # order + numStrikes: L # Number of bitmap strikes to follow +""" +sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat) + + +sbixStrikeOffsetFormat = """ + > + strikeOffset: L # Offset from begining of table to data for the + # individual strike +""" +sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat) + + +class table__s_b_i_x(DefaultTable.DefaultTable): + def __init__(self, tag=None): + DefaultTable.DefaultTable.__init__(self, tag) + self.version = 1 + self.flags = 1 + self.numStrikes = 0 + self.strikes = {} + self.strikeOffsets = [] + + def decompile(self, data, ttFont): + # read table header + sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self) + # collect offsets to individual strikes in self.strikeOffsets + for i in range(self.numStrikes): + current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize + offset_entry = sbixStrikeOffset() + sstruct.unpack( + sbixStrikeOffsetFormat, + data[current_offset : current_offset + sbixStrikeOffsetFormatSize], + offset_entry, + ) + self.strikeOffsets.append(offset_entry.strikeOffset) + + # decompile Strikes + for i in range(self.numStrikes - 1, -1, -1): + current_strike = Strike(rawdata=data[self.strikeOffsets[i] :]) + data = data[: self.strikeOffsets[i]] + current_strike.decompile(ttFont) + # print " Strike length: %xh" % len(bitmapSetData) + # print "Number of Glyph entries:", len(current_strike.glyphs) + if current_strike.ppem in self.strikes: + from fontTools import ttLib + + raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike") + self.strikes[current_strike.ppem] = current_strike + + # after the glyph data records have been extracted, we don't need the offsets anymore + del self.strikeOffsets + del self.numStrikes + + def compile(self, ttFont): + sbixData = b"" + self.numStrikes = len(self.strikes) + sbixHeader = sstruct.pack(sbixHeaderFormat, self) + + # calculate offset to start of first strike + setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes + + for si in sorted(self.strikes.keys()): + current_strike = self.strikes[si] + current_strike.compile(ttFont) + # append offset to this strike to table header + current_strike.strikeOffset = setOffset + sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike) + setOffset += len(current_strike.data) + sbixData += current_strike.data + + return sbixHeader + sbixData + + def toXML(self, xmlWriter, ttFont): + xmlWriter.simpletag("version", value=self.version) + xmlWriter.newline() + xmlWriter.simpletag("flags", value=num2binary(self.flags, 16)) + xmlWriter.newline() + for i in sorted(self.strikes.keys()): + self.strikes[i].toXML(xmlWriter, ttFont) + + def fromXML(self, name, attrs, content, ttFont): + if name == "version": + setattr(self, name, safeEval(attrs["value"])) + elif name == "flags": + setattr(self, name, binary2num(attrs["value"])) + elif name == "strike": + current_strike = Strike() + for element in content: + if isinstance(element, tuple): + name, attrs, content = element + current_strike.fromXML(name, attrs, content, ttFont) + self.strikes[current_strike.ppem] = current_strike + else: + from fontTools import ttLib + + raise ttLib.TTLibError("can't handle '%s' element" % name) + + +# Helper classes + + +class sbixStrikeOffset(object): + pass diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_h_e_a.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_h_e_a.py new file mode 100644 index 0000000000000000000000000000000000000000..de7ce245ad61246cb75000f9c6a208d0dbc984b3 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/_v_h_e_a.py @@ -0,0 +1,127 @@ +from fontTools.misc import sstruct +from fontTools.misc.textTools import safeEval +from fontTools.misc.fixedTools import ( + ensureVersionIsLong as fi2ve, + versionToFixed as ve2fi, +) +from . import DefaultTable +import math + + +vheaFormat = """ + > # big endian + tableVersion: L + ascent: h + descent: h + lineGap: h + advanceHeightMax: H + minTopSideBearing: h + minBottomSideBearing: h + yMaxExtent: h + caretSlopeRise: h + caretSlopeRun: h + caretOffset: h + reserved1: h + reserved2: h + reserved3: h + reserved4: h + metricDataFormat: h + numberOfVMetrics: H +""" + + +class table__v_h_e_a(DefaultTable.DefaultTable): + # Note: Keep in sync with table__h_h_e_a + + dependencies = ["vmtx", "glyf", "CFF ", "CFF2"] + + def decompile(self, data, ttFont): + sstruct.unpack(vheaFormat, data, self) + + def compile(self, ttFont): + if ttFont.recalcBBoxes and ( + ttFont.isLoaded("glyf") + or ttFont.isLoaded("CFF ") + or ttFont.isLoaded("CFF2") + ): + self.recalc(ttFont) + self.tableVersion = fi2ve(self.tableVersion) + return sstruct.pack(vheaFormat, self) + + def recalc(self, ttFont): + if "vmtx" not in ttFont: + return + + vmtxTable = ttFont["vmtx"] + self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values()) + + boundsHeightDict = {} + if "glyf" in ttFont: + glyfTable = ttFont["glyf"] + for name in ttFont.getGlyphOrder(): + g = glyfTable[name] + if g.numberOfContours == 0: + continue + if g.numberOfContours < 0 and not hasattr(g, "yMax"): + # Composite glyph without extents set. + # Calculate those. + g.recalcBounds(glyfTable) + boundsHeightDict[name] = g.yMax - g.yMin + elif "CFF " in ttFont or "CFF2" in ttFont: + if "CFF " in ttFont: + topDict = ttFont["CFF "].cff.topDictIndex[0] + else: + topDict = ttFont["CFF2"].cff.topDictIndex[0] + charStrings = topDict.CharStrings + for name in ttFont.getGlyphOrder(): + cs = charStrings[name] + bounds = cs.calcBounds(charStrings) + if bounds is not None: + boundsHeightDict[name] = int( + math.ceil(bounds[3]) - math.floor(bounds[1]) + ) + + if boundsHeightDict: + minTopSideBearing = float("inf") + minBottomSideBearing = float("inf") + yMaxExtent = -float("inf") + for name, boundsHeight in boundsHeightDict.items(): + advanceHeight, tsb = vmtxTable[name] + bsb = advanceHeight - tsb - boundsHeight + extent = tsb + boundsHeight + minTopSideBearing = min(minTopSideBearing, tsb) + minBottomSideBearing = min(minBottomSideBearing, bsb) + yMaxExtent = max(yMaxExtent, extent) + self.minTopSideBearing = minTopSideBearing + self.minBottomSideBearing = minBottomSideBearing + self.yMaxExtent = yMaxExtent + + else: # No glyph has outlines. + self.minTopSideBearing = 0 + self.minBottomSideBearing = 0 + self.yMaxExtent = 0 + + def toXML(self, writer, ttFont): + formatstring, names, fixes = sstruct.getformat(vheaFormat) + for name in names: + value = getattr(self, name) + if name == "tableVersion": + value = fi2ve(value) + value = "0x%08x" % value + writer.simpletag(name, value=value) + writer.newline() + + def fromXML(self, name, attrs, content, ttFont): + if name == "tableVersion": + setattr(self, name, ve2fi(attrs["value"])) + return + setattr(self, name, safeEval(attrs["value"])) + + # reserved0 is caretOffset for legacy reasons + @property + def reserved0(self): + return self.caretOffset + + @reserved0.setter + def reserved0(self, value): + self.caretOffset = value diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/table_API_readme.txt b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/table_API_readme.txt new file mode 100644 index 0000000000000000000000000000000000000000..7719201a0ea2d4b8f28a3012ad63c1de8bbf10ef --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/table_API_readme.txt @@ -0,0 +1,91 @@ +This folder is a subpackage of ttLib. Each module here is a +specialized TT/OT table converter: they can convert raw data +to Python objects and vice versa. Usually you don't need to +use the modules directly: they are imported and used +automatically when needed by ttLib. + +If you are writing you own table converter the following is +important. + +The modules here have pretty strange names: this is due to the +fact that we need to map TT table tags (which are case sensitive) +to filenames (which on Mac and Win aren't case sensitive) as well +as to Python identifiers. The latter means it can only contain +[A-Za-z0-9_] and cannot start with a number. + +ttLib provides functions to expand a tag into the format used here: + +>>> from fontTools import ttLib +>>> ttLib.tagToIdentifier("FOO ") +'F_O_O_' +>>> ttLib.tagToIdentifier("cvt ") +'_c_v_t' +>>> ttLib.tagToIdentifier("OS/2") +'O_S_2f_2' +>>> ttLib.tagToIdentifier("glyf") +'_g_l_y_f' +>>> + +And vice versa: + +>>> ttLib.identifierToTag("F_O_O_") +'FOO ' +>>> ttLib.identifierToTag("_c_v_t") +'cvt ' +>>> ttLib.identifierToTag("O_S_2f_2") +'OS/2' +>>> ttLib.identifierToTag("_g_l_y_f") +'glyf' +>>> + +Eg. the 'glyf' table converter lives in a Python file called: + + _g_l_y_f.py + +The converter itself is a class, named "table_" + expandedtag. Eg: + + class table__g_l_y_f: + etc. + +Note that if you _do_ need to use such modules or classes manually, +there are two convenient API functions that let you find them by tag: + +>>> ttLib.getTableModule('glyf') +<module 'ttLib.tables._g_l_y_f'> +>>> ttLib.getTableClass('glyf') +<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400> +>>> + +You must subclass from DefaultTable.DefaultTable. It provides some default +behavior, as well as a constructor method (__init__) that you don't need to +override. + +Your converter should minimally provide two methods: + +class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO ' + + def decompile(self, data, ttFont): + # 'data' is the raw table data. Unpack it into a + # Python data structure. + # 'ttFont' is a ttLib.TTfile instance, enabling you to + # refer to other tables. Do ***not*** keep a reference to + # it: it will cause a circular reference (ttFont saves + # a reference to us), and that means we'll be leaking + # memory. If you need to use it in other methods, just + # pass it around as a method argument. + + def compile(self, ttFont): + # Return the raw data, as converted from the Python + # data structure. + # Again, 'ttFont' is there so you can access other tables. + # Same warning applies. + +If you want to support TTX import/export as well, you need to provide two +additional methods: + + def toXML(self, writer, ttFont): + # XXX + + def fromXML(self, (name, attrs, content), ttFont): + # XXX + diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/ttProgram.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/ttProgram.py new file mode 100644 index 0000000000000000000000000000000000000000..32a4ec8b20ff8b4c20be33efebea7273af98931b --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/ttProgram.py @@ -0,0 +1,594 @@ +"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs.""" + +from __future__ import annotations + +from fontTools.misc.textTools import num2binary, binary2num, readHex, strjoin +import array +from io import StringIO +from typing import List +import re +import logging + + +log = logging.getLogger(__name__) + +# fmt: off + +# first, the list of instructions that eat bytes or words from the instruction stream + +streamInstructions = [ +# +# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x40, 'NPUSHB', 0, 'PushNBytes', 0, -1), # n, b1, b2,...bn b1,b2...bn + (0x41, 'NPUSHW', 0, 'PushNWords', 0, -1), # n, w1, w2,...w w1,w2...wn + (0xb0, 'PUSHB', 3, 'PushBytes', 0, -1), # b0, b1,..bn b0, b1, ...,bn + (0xb8, 'PUSHW', 3, 'PushWords', 0, -1), # w0,w1,..wn w0 ,w1, ...wn +] + + +# next, the list of "normal" instructions + +instructions = [ +# +# opcode mnemonic argBits descriptive name pops pushes eats from instruction stream pushes +# + (0x7f, 'AA', 0, 'AdjustAngle', 1, 0), # p - + (0x64, 'ABS', 0, 'Absolute', 1, 1), # n |n| + (0x60, 'ADD', 0, 'Add', 2, 1), # n2, n1 (n1 + n2) + (0x27, 'ALIGNPTS', 0, 'AlignPts', 2, 0), # p2, p1 - + (0x3c, 'ALIGNRP', 0, 'AlignRelativePt', -1, 0), # p1, p2, ... , ploopvalue - + (0x5a, 'AND', 0, 'LogicalAnd', 2, 1), # e2, e1 b + (0x2b, 'CALL', 0, 'CallFunction', 1, 0), # f - + (0x67, 'CEILING', 0, 'Ceiling', 1, 1), # n ceil(n) + (0x25, 'CINDEX', 0, 'CopyXToTopStack', 1, 1), # k ek + (0x22, 'CLEAR', 0, 'ClearStack', -1, 0), # all items on the stack - + (0x4f, 'DEBUG', 0, 'DebugCall', 1, 0), # n - + (0x73, 'DELTAC1', 0, 'DeltaExceptionC1', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x74, 'DELTAC2', 0, 'DeltaExceptionC2', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x75, 'DELTAC3', 0, 'DeltaExceptionC3', -1, 0), # argn, cn, argn-1,cn-1, , arg1, c1 - + (0x5d, 'DELTAP1', 0, 'DeltaExceptionP1', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x71, 'DELTAP2', 0, 'DeltaExceptionP2', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x72, 'DELTAP3', 0, 'DeltaExceptionP3', -1, 0), # argn, pn, argn-1, pn-1, , arg1, p1 - + (0x24, 'DEPTH', 0, 'GetDepthStack', 0, 1), # - n + (0x62, 'DIV', 0, 'Divide', 2, 1), # n2, n1 (n1 * 64)/ n2 + (0x20, 'DUP', 0, 'DuplicateTopStack', 1, 2), # e e, e + (0x59, 'EIF', 0, 'EndIf', 0, 0), # - - + (0x1b, 'ELSE', 0, 'Else', 0, 0), # - - + (0x2d, 'ENDF', 0, 'EndFunctionDefinition', 0, 0), # - - + (0x54, 'EQ', 0, 'Equal', 2, 1), # e2, e1 b + (0x57, 'EVEN', 0, 'Even', 1, 1), # e b + (0x2c, 'FDEF', 0, 'FunctionDefinition', 1, 0), # f - + (0x4e, 'FLIPOFF', 0, 'SetAutoFlipOff', 0, 0), # - - + (0x4d, 'FLIPON', 0, 'SetAutoFlipOn', 0, 0), # - - + (0x80, 'FLIPPT', 0, 'FlipPoint', -1, 0), # p1, p2, ..., ploopvalue - + (0x82, 'FLIPRGOFF', 0, 'FlipRangeOff', 2, 0), # h, l - + (0x81, 'FLIPRGON', 0, 'FlipRangeOn', 2, 0), # h, l - + (0x66, 'FLOOR', 0, 'Floor', 1, 1), # n floor(n) + (0x46, 'GC', 1, 'GetCoordOnPVector', 1, 1), # p c + (0x88, 'GETINFO', 0, 'GetInfo', 1, 1), # selector result + (0x91, 'GETVARIATION', 0, 'GetVariation', 0, -1), # - a1,..,an + (0x0d, 'GFV', 0, 'GetFVector', 0, 2), # - px, py + (0x0c, 'GPV', 0, 'GetPVector', 0, 2), # - px, py + (0x52, 'GT', 0, 'GreaterThan', 2, 1), # e2, e1 b + (0x53, 'GTEQ', 0, 'GreaterThanOrEqual', 2, 1), # e2, e1 b + (0x89, 'IDEF', 0, 'InstructionDefinition', 1, 0), # f - + (0x58, 'IF', 0, 'If', 1, 0), # e - + (0x8e, 'INSTCTRL', 0, 'SetInstrExecControl', 2, 0), # s, v - + (0x39, 'IP', 0, 'InterpolatePts', -1, 0), # p1, p2, ... , ploopvalue - + (0x0f, 'ISECT', 0, 'MovePtToIntersect', 5, 0), # a1, a0, b1, b0, p - + (0x30, 'IUP', 1, 'InterpolateUntPts', 0, 0), # - - + (0x1c, 'JMPR', 0, 'Jump', 1, 0), # offset - + (0x79, 'JROF', 0, 'JumpRelativeOnFalse', 2, 0), # e, offset - + (0x78, 'JROT', 0, 'JumpRelativeOnTrue', 2, 0), # e, offset - + (0x2a, 'LOOPCALL', 0, 'LoopAndCallFunction', 2, 0), # f, count - + (0x50, 'LT', 0, 'LessThan', 2, 1), # e2, e1 b + (0x51, 'LTEQ', 0, 'LessThenOrEqual', 2, 1), # e2, e1 b + (0x8b, 'MAX', 0, 'Maximum', 2, 1), # e2, e1 max(e1, e2) + (0x49, 'MD', 1, 'MeasureDistance', 2, 1), # p2,p1 d + (0x2e, 'MDAP', 1, 'MoveDirectAbsPt', 1, 0), # p - + (0xc0, 'MDRP', 5, 'MoveDirectRelPt', 1, 0), # p - + (0x3e, 'MIAP', 1, 'MoveIndirectAbsPt', 2, 0), # n, p - + (0x8c, 'MIN', 0, 'Minimum', 2, 1), # e2, e1 min(e1, e2) + (0x26, 'MINDEX', 0, 'MoveXToTopStack', 1, 1), # k ek + (0xe0, 'MIRP', 5, 'MoveIndirectRelPt', 2, 0), # n, p - + (0x4b, 'MPPEM', 0, 'MeasurePixelPerEm', 0, 1), # - ppem + (0x4c, 'MPS', 0, 'MeasurePointSize', 0, 1), # - pointSize + (0x3a, 'MSIRP', 1, 'MoveStackIndirRelPt', 2, 0), # d, p - + (0x63, 'MUL', 0, 'Multiply', 2, 1), # n2, n1 (n1 * n2)/64 + (0x65, 'NEG', 0, 'Negate', 1, 1), # n -n + (0x55, 'NEQ', 0, 'NotEqual', 2, 1), # e2, e1 b + (0x5c, 'NOT', 0, 'LogicalNot', 1, 1), # e ( not e ) + (0x6c, 'NROUND', 2, 'NoRound', 1, 1), # n1 n2 + (0x56, 'ODD', 0, 'Odd', 1, 1), # e b + (0x5b, 'OR', 0, 'LogicalOr', 2, 1), # e2, e1 b + (0x21, 'POP', 0, 'PopTopStack', 1, 0), # e - + (0x45, 'RCVT', 0, 'ReadCVT', 1, 1), # location value + (0x7d, 'RDTG', 0, 'RoundDownToGrid', 0, 0), # - - + (0x7a, 'ROFF', 0, 'RoundOff', 0, 0), # - - + (0x8a, 'ROLL', 0, 'RollTopThreeStack', 3, 3), # a,b,c b,a,c + (0x68, 'ROUND', 2, 'Round', 1, 1), # n1 n2 + (0x43, 'RS', 0, 'ReadStore', 1, 1), # n v + (0x3d, 'RTDG', 0, 'RoundToDoubleGrid', 0, 0), # - - + (0x18, 'RTG', 0, 'RoundToGrid', 0, 0), # - - + (0x19, 'RTHG', 0, 'RoundToHalfGrid', 0, 0), # - - + (0x7c, 'RUTG', 0, 'RoundUpToGrid', 0, 0), # - - + (0x77, 'S45ROUND', 0, 'SuperRound45Degrees', 1, 0), # n - + (0x7e, 'SANGW', 0, 'SetAngleWeight', 1, 0), # weight - + (0x85, 'SCANCTRL', 0, 'ScanConversionControl', 1, 0), # n - + (0x8d, 'SCANTYPE', 0, 'ScanType', 1, 0), # n - + (0x48, 'SCFS', 0, 'SetCoordFromStackFP', 2, 0), # c, p - + (0x1d, 'SCVTCI', 0, 'SetCVTCutIn', 1, 0), # n - + (0x5e, 'SDB', 0, 'SetDeltaBaseInGState', 1, 0), # n - + (0x86, 'SDPVTL', 1, 'SetDualPVectorToLine', 2, 0), # p2, p1 - + (0x5f, 'SDS', 0, 'SetDeltaShiftInGState', 1, 0), # n - + (0x0b, 'SFVFS', 0, 'SetFVectorFromStack', 2, 0), # y, x - + (0x04, 'SFVTCA', 1, 'SetFVectorToAxis', 0, 0), # - - + (0x08, 'SFVTL', 1, 'SetFVectorToLine', 2, 0), # p2, p1 - + (0x0e, 'SFVTPV', 0, 'SetFVectorToPVector', 0, 0), # - - + (0x34, 'SHC', 1, 'ShiftContourByLastPt', 1, 0), # c - + (0x32, 'SHP', 1, 'ShiftPointByLastPoint', -1, 0), # p1, p2, ..., ploopvalue - + (0x38, 'SHPIX', 0, 'ShiftZoneByPixel', -1, 0), # d, p1, p2, ..., ploopvalue - + (0x36, 'SHZ', 1, 'ShiftZoneByLastPoint', 1, 0), # e - + (0x17, 'SLOOP', 0, 'SetLoopVariable', 1, 0), # n - + (0x1a, 'SMD', 0, 'SetMinimumDistance', 1, 0), # distance - + (0x0a, 'SPVFS', 0, 'SetPVectorFromStack', 2, 0), # y, x - + (0x02, 'SPVTCA', 1, 'SetPVectorToAxis', 0, 0), # - - + (0x06, 'SPVTL', 1, 'SetPVectorToLine', 2, 0), # p2, p1 - + (0x76, 'SROUND', 0, 'SuperRound', 1, 0), # n - + (0x10, 'SRP0', 0, 'SetRefPoint0', 1, 0), # p - + (0x11, 'SRP1', 0, 'SetRefPoint1', 1, 0), # p - + (0x12, 'SRP2', 0, 'SetRefPoint2', 1, 0), # p - + (0x1f, 'SSW', 0, 'SetSingleWidth', 1, 0), # n - + (0x1e, 'SSWCI', 0, 'SetSingleWidthCutIn', 1, 0), # n - + (0x61, 'SUB', 0, 'Subtract', 2, 1), # n2, n1 (n1 - n2) + (0x00, 'SVTCA', 1, 'SetFPVectorToAxis', 0, 0), # - - + (0x23, 'SWAP', 0, 'SwapTopStack', 2, 2), # e2, e1 e1, e2 + (0x13, 'SZP0', 0, 'SetZonePointer0', 1, 0), # n - + (0x14, 'SZP1', 0, 'SetZonePointer1', 1, 0), # n - + (0x15, 'SZP2', 0, 'SetZonePointer2', 1, 0), # n - + (0x16, 'SZPS', 0, 'SetZonePointerS', 1, 0), # n - + (0x29, 'UTP', 0, 'UnTouchPt', 1, 0), # p - + (0x70, 'WCVTF', 0, 'WriteCVTInFUnits', 2, 0), # n, l - + (0x44, 'WCVTP', 0, 'WriteCVTInPixels', 2, 0), # v, l - + (0x42, 'WS', 0, 'WriteStore', 2, 0), # v, l - +] + +# fmt: on + + +def bitRepr(value, bits): + s = "" + for i in range(bits): + s = "01"[value & 0x1] + s + value = value >> 1 + return s + + +_mnemonicPat = re.compile(r"[A-Z][A-Z0-9]*$") + + +def _makeDict(instructionList): + opcodeDict = {} + mnemonicDict = {} + for op, mnemonic, argBits, name, pops, pushes in instructionList: + assert _mnemonicPat.match(mnemonic) + mnemonicDict[mnemonic] = op, argBits, name + if argBits: + argoffset = op + for i in range(1 << argBits): + opcodeDict[op + i] = mnemonic, argBits, argoffset, name + else: + opcodeDict[op] = mnemonic, 0, 0, name + return opcodeDict, mnemonicDict + + +streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions) +opcodeDict, mnemonicDict = _makeDict(instructions) + + +class tt_instructions_error(Exception): + def __init__(self, error): + self.error = error + + def __str__(self): + return "TT instructions error: %s" % repr(self.error) + + +_comment = r"/\*.*?\*/" +_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]" +_number = r"-?[0-9]+" +_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment) + +_tokenRE = re.compile(_token) +_whiteRE = re.compile(r"\s*") + +_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]+).*?\*/") + +_indentRE = re.compile(r"^FDEF|IF|ELSE\[ \]\t.+") +_unindentRE = re.compile(r"^ELSE|ENDF|EIF\[ \]\t.+") + + +def _skipWhite(data, pos): + m = _whiteRE.match(data, pos) + newPos = m.regs[0][1] + assert newPos >= pos + return newPos + + +class Program(object): + def __init__(self) -> None: + pass + + def fromBytecode(self, bytecode: bytes) -> None: + self.bytecode = array.array("B", bytecode) + if hasattr(self, "assembly"): + del self.assembly + + def fromAssembly(self, assembly: List[str] | str) -> None: + if isinstance(assembly, list): + self.assembly = assembly + elif isinstance(assembly, str): + self.assembly = assembly.splitlines() + else: + raise TypeError(f"expected str or List[str], got {type(assembly).__name__}") + if hasattr(self, "bytecode"): + del self.bytecode + + def getBytecode(self) -> bytes: + if not hasattr(self, "bytecode"): + self._assemble() + return self.bytecode.tobytes() + + def getAssembly(self, preserve=True) -> List[str]: + if not hasattr(self, "assembly"): + self._disassemble(preserve=preserve) + return self.assembly + + def toXML(self, writer, ttFont) -> None: + if ( + not hasattr(ttFont, "disassembleInstructions") + or ttFont.disassembleInstructions + ): + try: + assembly = self.getAssembly() + except: + import traceback + + tmp = StringIO() + traceback.print_exc(file=tmp) + msg = "An exception occurred during the decompilation of glyph program:\n\n" + msg += tmp.getvalue() + log.error(msg) + writer.begintag("bytecode") + writer.newline() + writer.comment(msg.strip()) + writer.newline() + writer.dumphex(self.getBytecode()) + writer.endtag("bytecode") + writer.newline() + else: + if not assembly: + return + writer.begintag("assembly") + writer.newline() + i = 0 + indent = 0 + nInstr = len(assembly) + while i < nInstr: + instr = assembly[i] + if _unindentRE.match(instr): + indent -= 1 + writer.write(writer.indentwhite * indent) + writer.write(instr) + writer.newline() + m = _pushCountPat.match(instr) + i = i + 1 + if m: + nValues = int(m.group(1)) + line: List[str] = [] + j = 0 + for j in range(nValues): + if j and not (j % 25): + writer.write(writer.indentwhite * indent) + writer.write(" ".join(line)) + writer.newline() + line = [] + line.append(assembly[i + j]) + writer.write(writer.indentwhite * indent) + writer.write(" ".join(line)) + writer.newline() + i = i + j + 1 + if _indentRE.match(instr): + indent += 1 + writer.endtag("assembly") + writer.newline() + else: + bytecode = self.getBytecode() + if not bytecode: + return + writer.begintag("bytecode") + writer.newline() + writer.dumphex(bytecode) + writer.endtag("bytecode") + writer.newline() + + def fromXML(self, name, attrs, content, ttFont) -> None: + if name == "assembly": + self.fromAssembly(strjoin(content)) + self._assemble() + del self.assembly + else: + assert name == "bytecode" + self.fromBytecode(readHex(content)) + + def _assemble(self) -> None: + assembly = " ".join(getattr(self, "assembly", [])) + bytecode: List[int] = [] + push = bytecode.append + lenAssembly = len(assembly) + pos = _skipWhite(assembly, 0) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error( + "Syntax error in TT program (%s)" % assembly[pos - 5 : pos + 15] + ) + dummy, mnemonic, arg, number, comment = m.groups() + pos = m.regs[0][1] + if comment: + pos = _skipWhite(assembly, pos) + continue + + arg = arg.strip() + if mnemonic.startswith("INSTR"): + # Unknown instruction + op = int(mnemonic[5:]) + push(op) + elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"): + op, argBits, name = mnemonicDict[mnemonic] + if len(arg) != argBits: + raise tt_instructions_error( + "Incorrect number of argument bits (%s[%s])" % (mnemonic, arg) + ) + if arg: + arg = binary2num(arg) + push(op + arg) + else: + push(op) + else: + args = [] + pos = _skipWhite(assembly, pos) + while pos < lenAssembly: + m = _tokenRE.match(assembly, pos) + if m is None: + raise tt_instructions_error( + "Syntax error in TT program (%s)" % assembly[pos : pos + 15] + ) + dummy, _mnemonic, arg, number, comment = m.groups() + if number is None and comment is None: + break + pos = m.regs[0][1] + pos = _skipWhite(assembly, pos) + if comment is not None: + continue + args.append(int(number)) + nArgs = len(args) + if mnemonic == "PUSH": + # Automatically choose the most compact representation + nWords = 0 + while nArgs: + while ( + nWords < nArgs + and nWords < 255 + and not (0 <= args[nWords] <= 255) + ): + nWords += 1 + nBytes = 0 + while ( + nWords + nBytes < nArgs + and nBytes < 255 + and 0 <= args[nWords + nBytes] <= 255 + ): + nBytes += 1 + if ( + nBytes < 2 + and nWords + nBytes < 255 + and nWords + nBytes != nArgs + ): + # Will write bytes as words + nWords += nBytes + continue + + # Write words + if nWords: + if nWords <= 8: + op, argBits, name = streamMnemonicDict["PUSHW"] + op = op + nWords - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHW"] + push(op) + push(nWords) + for value in args[:nWords]: + assert -32768 <= value < 32768, ( + "PUSH value out of range %d" % value + ) + push((value >> 8) & 0xFF) + push(value & 0xFF) + + # Write bytes + if nBytes: + pass + if nBytes <= 8: + op, argBits, name = streamMnemonicDict["PUSHB"] + op = op + nBytes - 1 + push(op) + else: + op, argBits, name = streamMnemonicDict["NPUSHB"] + push(op) + push(nBytes) + for value in args[nWords : nWords + nBytes]: + push(value) + + nTotal = nWords + nBytes + args = args[nTotal:] + nArgs -= nTotal + nWords = 0 + else: + # Write exactly what we've been asked to + words = mnemonic[-1] == "W" + op, argBits, name = streamMnemonicDict[mnemonic] + if mnemonic[0] != "N": + assert nArgs <= 8, nArgs + op = op + nArgs - 1 + push(op) + else: + assert nArgs < 256 + push(op) + push(nArgs) + if words: + for value in args: + assert -32768 <= value < 32768, ( + "PUSHW value out of range %d" % value + ) + push((value >> 8) & 0xFF) + push(value & 0xFF) + else: + for value in args: + assert 0 <= value < 256, ( + "PUSHB value out of range %d" % value + ) + push(value) + + pos = _skipWhite(assembly, pos) + + if bytecode: + assert max(bytecode) < 256 and min(bytecode) >= 0 + self.bytecode = array.array("B", bytecode) + + def _disassemble(self, preserve=False) -> None: + assembly = [] + i = 0 + bytecode = getattr(self, "bytecode", []) + numBytecode = len(bytecode) + while i < numBytecode: + op = bytecode[i] + try: + mnemonic, argBits, argoffset, name = opcodeDict[op] + except KeyError: + if op in streamOpcodeDict: + values = [] + + # Merge consecutive PUSH operations + while bytecode[i] in streamOpcodeDict: + op = bytecode[i] + mnemonic, argBits, argoffset, name = streamOpcodeDict[op] + words = mnemonic[-1] == "W" + if argBits: + nValues = op - argoffset + 1 + else: + i = i + 1 + nValues = bytecode[i] + i = i + 1 + assert nValues > 0 + if not words: + for j in range(nValues): + value = bytecode[i] + values.append(repr(value)) + i = i + 1 + else: + for j in range(nValues): + # cast to signed int16 + value = (bytecode[i] << 8) | bytecode[i + 1] + if value >= 0x8000: + value = value - 0x10000 + values.append(repr(value)) + i = i + 2 + if preserve: + break + + if not preserve: + mnemonic = "PUSH" + nValues = len(values) + if nValues == 1: + assembly.append("%s[ ] /* 1 value pushed */" % mnemonic) + else: + assembly.append( + "%s[ ] /* %s values pushed */" % (mnemonic, nValues) + ) + assembly.extend(values) + else: + assembly.append("INSTR%d[ ]" % op) + i = i + 1 + else: + if argBits: + assembly.append( + mnemonic + + "[%s] /* %s */" % (num2binary(op - argoffset, argBits), name) + ) + else: + assembly.append(mnemonic + "[ ] /* %s */" % name) + i = i + 1 + self.assembly = assembly + + def __bool__(self) -> bool: + """ + >>> p = Program() + >>> bool(p) + False + >>> bc = array.array("B", [0]) + >>> p.fromBytecode(bc) + >>> bool(p) + True + >>> p.bytecode.pop() + 0 + >>> bool(p) + False + + >>> p = Program() + >>> asm = ['SVTCA[0]'] + >>> p.fromAssembly(asm) + >>> bool(p) + True + >>> p.assembly.pop() + 'SVTCA[0]' + >>> bool(p) + False + """ + return (hasattr(self, "assembly") and len(self.assembly) > 0) or ( + hasattr(self, "bytecode") and len(self.bytecode) > 0 + ) + + __nonzero__ = __bool__ + + def __eq__(self, other) -> bool: + if type(self) != type(other): + return NotImplemented + return self.__dict__ == other.__dict__ + + def __ne__(self, other) -> bool: + result = self.__eq__(other) + return result if result is NotImplemented else not result + + +def _test(): + """ + >>> _test() + True + """ + + bc = b"""@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-""" + + p = Program() + p.fromBytecode(bc) + asm = p.getAssembly(preserve=True) + p.fromAssembly(asm) + print(bc == p.getBytecode()) + + +if __name__ == "__main__": + import sys + import doctest + + sys.exit(doctest.testmod().failed) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/woff2.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/woff2.py new file mode 100644 index 0000000000000000000000000000000000000000..03667e834b7d82e432f792fd5200930b39792e33 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/woff2.py @@ -0,0 +1,1683 @@ +from io import BytesIO +import sys +import array +import struct +from collections import OrderedDict +from fontTools.misc import sstruct +from fontTools.misc.arrayTools import calcIntBounds +from fontTools.misc.textTools import Tag, bytechr, byteord, bytesjoin, pad +from fontTools.ttLib import ( + TTFont, + TTLibError, + getTableModule, + getTableClass, + getSearchRange, +) +from fontTools.ttLib.sfnt import ( + SFNTReader, + SFNTWriter, + DirectoryEntry, + WOFFFlavorData, + sfntDirectoryFormat, + sfntDirectorySize, + SFNTDirectoryEntry, + sfntDirectoryEntrySize, + calcChecksum, +) +from fontTools.ttLib.tables import ttProgram, _g_l_y_f +import logging + + +log = logging.getLogger("fontTools.ttLib.woff2") + +haveBrotli = False +try: + try: + import brotlicffi as brotli + except ImportError: + import brotli + haveBrotli = True +except ImportError: + pass + + +class WOFF2Reader(SFNTReader): + flavor = "woff2" + + def __init__(self, file, checkChecksums=0, fontNumber=-1): + if not haveBrotli: + log.error( + "The WOFF2 decoder requires the Brotli Python extension, available at: " + "https://github.com/google/brotli" + ) + raise ImportError("No module named brotli") + + self.file = file + + signature = Tag(self.file.read(4)) + if signature != b"wOF2": + raise TTLibError("Not a WOFF2 font (bad signature)") + + self.file.seek(0) + self.DirectoryEntry = WOFF2DirectoryEntry + data = self.file.read(woff2DirectorySize) + if len(data) != woff2DirectorySize: + raise TTLibError("Not a WOFF2 font (not enough data)") + sstruct.unpack(woff2DirectoryFormat, data, self) + + self.tables = OrderedDict() + offset = 0 + for i in range(self.numTables): + entry = self.DirectoryEntry() + entry.fromFile(self.file) + tag = Tag(entry.tag) + self.tables[tag] = entry + entry.offset = offset + offset += entry.length + + totalUncompressedSize = offset + compressedData = self.file.read(self.totalCompressedSize) + decompressedData = brotli.decompress(compressedData) + if len(decompressedData) != totalUncompressedSize: + raise TTLibError( + "unexpected size for decompressed font data: expected %d, found %d" + % (totalUncompressedSize, len(decompressedData)) + ) + self.transformBuffer = BytesIO(decompressedData) + + self.file.seek(0, 2) + if self.length != self.file.tell(): + raise TTLibError("reported 'length' doesn't match the actual file size") + + self.flavorData = WOFF2FlavorData(self) + + # make empty TTFont to store data while reconstructing tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __getitem__(self, tag): + """Fetch the raw table data. Reconstruct transformed tables.""" + entry = self.tables[Tag(tag)] + if not hasattr(entry, "data"): + if entry.transformed: + entry.data = self.reconstructTable(tag) + else: + entry.data = entry.loadData(self.transformBuffer) + return entry.data + + def reconstructTable(self, tag): + """Reconstruct table named 'tag' from transformed data.""" + entry = self.tables[Tag(tag)] + rawData = entry.loadData(self.transformBuffer) + if tag == "glyf": + # no need to pad glyph data when reconstructing + padding = self.padding if hasattr(self, "padding") else None + data = self._reconstructGlyf(rawData, padding) + elif tag == "loca": + data = self._reconstructLoca() + elif tag == "hmtx": + data = self._reconstructHmtx(rawData) + else: + raise TTLibError("transform for table '%s' is unknown" % tag) + return data + + def _reconstructGlyf(self, data, padding=None): + """Return recostructed glyf table data, and set the corresponding loca's + locations. Optionally pad glyph offsets to the specified number of bytes. + """ + self.ttFont["loca"] = WOFF2LocaTable() + glyfTable = self.ttFont["glyf"] = WOFF2GlyfTable() + glyfTable.reconstruct(data, self.ttFont) + if padding: + glyfTable.padding = padding + data = glyfTable.compile(self.ttFont) + return data + + def _reconstructLoca(self): + """Return reconstructed loca table data.""" + if "loca" not in self.ttFont: + # make sure glyf is reconstructed first + self.tables["glyf"].data = self.reconstructTable("glyf") + locaTable = self.ttFont["loca"] + data = locaTable.compile(self.ttFont) + if len(data) != self.tables["loca"].origLength: + raise TTLibError( + "reconstructed 'loca' table doesn't match original size: " + "expected %d, found %d" % (self.tables["loca"].origLength, len(data)) + ) + return data + + def _reconstructHmtx(self, data): + """Return reconstructed hmtx table data.""" + # Before reconstructing 'hmtx' table we need to parse other tables: + # 'glyf' is required for reconstructing the sidebearings from the glyphs' + # bounding box; 'hhea' is needed for the numberOfHMetrics field. + if "glyf" in self.flavorData.transformedTables: + # transformed 'glyf' table is self-contained, thus 'loca' not needed + tableDependencies = ("maxp", "hhea", "glyf") + else: + # decompiling untransformed 'glyf' requires 'loca', which requires 'head' + tableDependencies = ("maxp", "head", "hhea", "loca", "glyf") + for tag in tableDependencies: + self._decompileTable(tag) + hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable() + hmtxTable.reconstruct(data, self.ttFont) + data = hmtxTable.compile(self.ttFont) + return data + + def _decompileTable(self, tag): + """Decompile table data and store it inside self.ttFont.""" + data = self[tag] + if self.ttFont.isLoaded(tag): + return self.ttFont[tag] + tableClass = getTableClass(tag) + table = tableClass(tag) + self.ttFont.tables[tag] = table + table.decompile(data, self.ttFont) + + +class WOFF2Writer(SFNTWriter): + flavor = "woff2" + + def __init__( + self, + file, + numTables, + sfntVersion="\000\001\000\000", + flavor=None, + flavorData=None, + ): + if not haveBrotli: + log.error( + "The WOFF2 encoder requires the Brotli Python extension, available at: " + "https://github.com/google/brotli" + ) + raise ImportError("No module named brotli") + + self.file = file + self.numTables = numTables + self.sfntVersion = Tag(sfntVersion) + self.flavorData = WOFF2FlavorData(data=flavorData) + + self.directoryFormat = woff2DirectoryFormat + self.directorySize = woff2DirectorySize + self.DirectoryEntry = WOFF2DirectoryEntry + + self.signature = Tag("wOF2") + + self.nextTableOffset = 0 + self.transformBuffer = BytesIO() + + self.tables = OrderedDict() + + # make empty TTFont to store data while normalising and transforming tables + self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False) + + def __setitem__(self, tag, data): + """Associate new entry named 'tag' with raw table data.""" + if tag in self.tables: + raise TTLibError("cannot rewrite '%s' table" % tag) + if tag == "DSIG": + # always drop DSIG table, since the encoding process can invalidate it + self.numTables -= 1 + return + + entry = self.DirectoryEntry() + entry.tag = Tag(tag) + entry.flags = getKnownTagIndex(entry.tag) + # WOFF2 table data are written to disk only on close(), after all tags + # have been specified + entry.data = data + + self.tables[tag] = entry + + def close(self): + """All tags must have been specified. Now write the table data and directory.""" + if len(self.tables) != self.numTables: + raise TTLibError( + "wrong number of tables; expected %d, found %d" + % (self.numTables, len(self.tables)) + ) + + if self.sfntVersion in ("\x00\x01\x00\x00", "true"): + isTrueType = True + elif self.sfntVersion == "OTTO": + isTrueType = False + else: + raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)") + + # The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned. + # However, the reference WOFF2 implementation still fails to reconstruct + # 'unpadded' glyf tables, therefore we need to 'normalise' them. + # See: + # https://github.com/khaledhosny/ots/issues/60 + # https://github.com/google/woff2/issues/15 + if ( + isTrueType + and "glyf" in self.flavorData.transformedTables + and "glyf" in self.tables + ): + self._normaliseGlyfAndLoca(padding=4) + self._setHeadTransformFlag() + + # To pass the legacy OpenType Sanitiser currently included in browsers, + # we must sort the table directory and data alphabetically by tag. + # See: + # https://github.com/google/woff2/pull/3 + # https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html + # + # 2023: We rely on this in _transformTables where we expect that + # "loca" comes after "glyf" table. + self.tables = OrderedDict(sorted(self.tables.items())) + + self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets() + + fontData = self._transformTables() + compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT) + + self.totalCompressedSize = len(compressedFont) + self.length = self._calcTotalSize() + self.majorVersion, self.minorVersion = self._getVersion() + self.reserved = 0 + + directory = self._packTableDirectory() + self.file.seek(0) + self.file.write(pad(directory + compressedFont, size=4)) + self._writeFlavorData() + + def _normaliseGlyfAndLoca(self, padding=4): + """Recompile glyf and loca tables, aligning glyph offsets to multiples of + 'padding' size. Update the head table's 'indexToLocFormat' accordingly while + compiling loca. + """ + if self.sfntVersion == "OTTO": + return + + for tag in ("maxp", "head", "loca", "glyf", "fvar"): + if tag in self.tables: + self._decompileTable(tag) + self.ttFont["glyf"].padding = padding + for tag in ("glyf", "loca"): + self._compileTable(tag) + + def _setHeadTransformFlag(self): + """Set bit 11 of 'head' table flags to indicate that the font has undergone + a lossless modifying transform. Re-compile head table data.""" + self._decompileTable("head") + self.ttFont["head"].flags |= 1 << 11 + self._compileTable("head") + + def _decompileTable(self, tag): + """Fetch table data, decompile it, and store it inside self.ttFont.""" + tag = Tag(tag) + if tag not in self.tables: + raise TTLibError("missing required table: %s" % tag) + if self.ttFont.isLoaded(tag): + return + data = self.tables[tag].data + if tag == "loca": + tableClass = WOFF2LocaTable + elif tag == "glyf": + tableClass = WOFF2GlyfTable + elif tag == "hmtx": + tableClass = WOFF2HmtxTable + else: + tableClass = getTableClass(tag) + table = tableClass(tag) + self.ttFont.tables[tag] = table + table.decompile(data, self.ttFont) + + def _compileTable(self, tag): + """Compile table and store it in its 'data' attribute.""" + self.tables[tag].data = self.ttFont[tag].compile(self.ttFont) + + def _calcSFNTChecksumsLengthsAndOffsets(self): + """Compute the 'original' SFNT checksums, lengths and offsets for checksum + adjustment calculation. Return the total size of the uncompressed font. + """ + offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables) + for tag, entry in self.tables.items(): + data = entry.data + entry.origOffset = offset + entry.origLength = len(data) + if tag == "head": + entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:]) + else: + entry.checkSum = calcChecksum(data) + offset += (entry.origLength + 3) & ~3 + return offset + + def _transformTables(self): + """Return transformed font data.""" + transformedTables = self.flavorData.transformedTables + for tag, entry in self.tables.items(): + data = None + if tag in transformedTables: + data = self.transformTable(tag) + if data is not None: + entry.transformed = True + if data is None: + if tag == "glyf": + # Currently we always sort table tags so + # 'loca' comes after 'glyf'. + transformedTables.discard("loca") + # pass-through the table data without transformation + data = entry.data + entry.transformed = False + entry.offset = self.nextTableOffset + entry.saveData(self.transformBuffer, data) + self.nextTableOffset += entry.length + self.writeMasterChecksum() + fontData = self.transformBuffer.getvalue() + return fontData + + def transformTable(self, tag): + """Return transformed table data, or None if some pre-conditions aren't + met -- in which case, the non-transformed table data will be used. + """ + if tag == "loca": + data = b"" + elif tag == "glyf": + for tag in ("maxp", "head", "loca", "glyf"): + self._decompileTable(tag) + glyfTable = self.ttFont["glyf"] + data = glyfTable.transform(self.ttFont) + elif tag == "hmtx": + if "glyf" not in self.tables: + return + for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"): + self._decompileTable(tag) + hmtxTable = self.ttFont["hmtx"] + data = hmtxTable.transform(self.ttFont) # can be None + else: + raise TTLibError("Transform for table '%s' is unknown" % tag) + return data + + def _calcMasterChecksum(self): + """Calculate checkSumAdjustment.""" + tags = list(self.tables.keys()) + checksums = [] + for i in range(len(tags)): + checksums.append(self.tables[tags[i]].checkSum) + + # Create a SFNT directory for checksum calculation purposes + self.searchRange, self.entrySelector, self.rangeShift = getSearchRange( + self.numTables, 16 + ) + directory = sstruct.pack(sfntDirectoryFormat, self) + tables = sorted(self.tables.items()) + for tag, entry in tables: + sfntEntry = SFNTDirectoryEntry() + sfntEntry.tag = entry.tag + sfntEntry.checkSum = entry.checkSum + sfntEntry.offset = entry.origOffset + sfntEntry.length = entry.origLength + directory = directory + sfntEntry.toString() + + directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize + assert directory_end == len(directory) + + checksums.append(calcChecksum(directory)) + checksum = sum(checksums) & 0xFFFFFFFF + # BiboAfba! + checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF + return checksumadjustment + + def writeMasterChecksum(self): + """Write checkSumAdjustment to the transformBuffer.""" + checksumadjustment = self._calcMasterChecksum() + self.transformBuffer.seek(self.tables["head"].offset + 8) + self.transformBuffer.write(struct.pack(">L", checksumadjustment)) + + def _calcTotalSize(self): + """Calculate total size of WOFF2 font, including any meta- and/or private data.""" + offset = self.directorySize + for entry in self.tables.values(): + offset += len(entry.toString()) + offset += self.totalCompressedSize + offset = (offset + 3) & ~3 + offset = self._calcFlavorDataOffsetsAndSize(offset) + return offset + + def _calcFlavorDataOffsetsAndSize(self, start): + """Calculate offsets and lengths for any meta- and/or private data.""" + offset = start + data = self.flavorData + if data.metaData: + self.metaOrigLength = len(data.metaData) + self.metaOffset = offset + self.compressedMetaData = brotli.compress( + data.metaData, mode=brotli.MODE_TEXT + ) + self.metaLength = len(self.compressedMetaData) + offset += self.metaLength + else: + self.metaOffset = self.metaLength = self.metaOrigLength = 0 + self.compressedMetaData = b"" + if data.privData: + # make sure private data is padded to 4-byte boundary + offset = (offset + 3) & ~3 + self.privOffset = offset + self.privLength = len(data.privData) + offset += self.privLength + else: + self.privOffset = self.privLength = 0 + return offset + + def _getVersion(self): + """Return the WOFF2 font's (majorVersion, minorVersion) tuple.""" + data = self.flavorData + if data.majorVersion is not None and data.minorVersion is not None: + return data.majorVersion, data.minorVersion + else: + # if None, return 'fontRevision' from 'head' table + if "head" in self.tables: + return struct.unpack(">HH", self.tables["head"].data[4:8]) + else: + return 0, 0 + + def _packTableDirectory(self): + """Return WOFF2 table directory data.""" + directory = sstruct.pack(self.directoryFormat, self) + for entry in self.tables.values(): + directory = directory + entry.toString() + return directory + + def _writeFlavorData(self): + """Write metadata and/or private data using appropiate padding.""" + compressedMetaData = self.compressedMetaData + privData = self.flavorData.privData + if compressedMetaData and privData: + compressedMetaData = pad(compressedMetaData, size=4) + if compressedMetaData: + self.file.seek(self.metaOffset) + assert self.file.tell() == self.metaOffset + self.file.write(compressedMetaData) + if privData: + self.file.seek(self.privOffset) + assert self.file.tell() == self.privOffset + self.file.write(privData) + + def reordersTables(self): + return True + + +# -- woff2 directory helpers and cruft + +woff2DirectoryFormat = """ + > # big endian + signature: 4s # "wOF2" + sfntVersion: 4s + length: L # total woff2 file size + numTables: H # number of tables + reserved: H # set to 0 + totalSfntSize: L # uncompressed size + totalCompressedSize: L # compressed size + majorVersion: H # major version of WOFF file + minorVersion: H # minor version of WOFF file + metaOffset: L # offset to metadata block + metaLength: L # length of compressed metadata + metaOrigLength: L # length of uncompressed metadata + privOffset: L # offset to private data block + privLength: L # length of private data block +""" + +woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat) + +woff2KnownTags = ( + "cmap", + "head", + "hhea", + "hmtx", + "maxp", + "name", + "OS/2", + "post", + "cvt ", + "fpgm", + "glyf", + "loca", + "prep", + "CFF ", + "VORG", + "EBDT", + "EBLC", + "gasp", + "hdmx", + "kern", + "LTSH", + "PCLT", + "VDMX", + "vhea", + "vmtx", + "BASE", + "GDEF", + "GPOS", + "GSUB", + "EBSC", + "JSTF", + "MATH", + "CBDT", + "CBLC", + "COLR", + "CPAL", + "SVG ", + "sbix", + "acnt", + "avar", + "bdat", + "bloc", + "bsln", + "cvar", + "fdsc", + "feat", + "fmtx", + "fvar", + "gvar", + "hsty", + "just", + "lcar", + "mort", + "morx", + "opbd", + "prop", + "trak", + "Zapf", + "Silf", + "Glat", + "Gloc", + "Feat", + "Sill", +) + +woff2FlagsFormat = """ + > # big endian + flags: B # table type and flags +""" + +woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat) + +woff2UnknownTagFormat = """ + > # big endian + tag: 4s # 4-byte tag (optional) +""" + +woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat) + +woff2UnknownTagIndex = 0x3F + +woff2Base128MaxSize = 5 +woff2DirectoryEntryMaxSize = ( + woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize +) + +woff2TransformedTableTags = ("glyf", "loca") + +woff2GlyfTableFormat = """ + > # big endian + version: H # = 0x0000 + optionFlags: H # Bit 0: we have overlapSimpleBitmap[], Bits 1-15: reserved + numGlyphs: H # Number of glyphs + indexFormat: H # Offset format for loca table + nContourStreamSize: L # Size of nContour stream + nPointsStreamSize: L # Size of nPoints stream + flagStreamSize: L # Size of flag stream + glyphStreamSize: L # Size of glyph stream + compositeStreamSize: L # Size of composite stream + bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream + instructionStreamSize: L # Size of instruction stream +""" + +woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat) + +bboxFormat = """ + > # big endian + xMin: h + yMin: h + xMax: h + yMax: h +""" + +woff2OverlapSimpleBitmapFlag = 0x0001 + + +def getKnownTagIndex(tag): + """Return index of 'tag' in woff2KnownTags list. Return 63 if not found.""" + for i in range(len(woff2KnownTags)): + if tag == woff2KnownTags[i]: + return i + return woff2UnknownTagIndex + + +class WOFF2DirectoryEntry(DirectoryEntry): + def fromFile(self, file): + pos = file.tell() + data = file.read(woff2DirectoryEntryMaxSize) + left = self.fromString(data) + consumed = len(data) - len(left) + file.seek(pos + consumed) + + def fromString(self, data): + if len(data) < 1: + raise TTLibError("can't read table 'flags': not enough data") + dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self) + if self.flags & 0x3F == 0x3F: + # if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value + if len(data) < woff2UnknownTagSize: + raise TTLibError("can't read table 'tag': not enough data") + dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self) + else: + # otherwise, tag is derived from a fixed 'Known Tags' table + self.tag = woff2KnownTags[self.flags & 0x3F] + self.tag = Tag(self.tag) + self.origLength, data = unpackBase128(data) + self.length = self.origLength + if self.transformed: + self.length, data = unpackBase128(data) + if self.tag == "loca" and self.length != 0: + raise TTLibError("the transformLength of the 'loca' table must be 0") + # return left over data + return data + + def toString(self): + data = bytechr(self.flags) + if (self.flags & 0x3F) == 0x3F: + data += struct.pack(">4s", self.tag.tobytes()) + data += packBase128(self.origLength) + if self.transformed: + data += packBase128(self.length) + return data + + @property + def transformVersion(self): + """Return bits 6-7 of table entry's flags, which indicate the preprocessing + transformation version number (between 0 and 3). + """ + return self.flags >> 6 + + @transformVersion.setter + def transformVersion(self, value): + assert 0 <= value <= 3 + self.flags |= value << 6 + + @property + def transformed(self): + """Return True if the table has any transformation, else return False.""" + # For all tables in a font, except for 'glyf' and 'loca', the transformation + # version 0 indicates the null transform (where the original table data is + # passed directly to the Brotli compressor). For 'glyf' and 'loca' tables, + # transformation version 3 indicates the null transform + if self.tag in {"glyf", "loca"}: + return self.transformVersion != 3 + else: + return self.transformVersion != 0 + + @transformed.setter + def transformed(self, booleanValue): + # here we assume that a non-null transform means version 0 for 'glyf' and + # 'loca' and 1 for every other table (e.g. hmtx); but that may change as + # new transformation formats are introduced in the future (if ever). + if self.tag in {"glyf", "loca"}: + self.transformVersion = 3 if not booleanValue else 0 + else: + self.transformVersion = int(booleanValue) + + +class WOFF2LocaTable(getTableClass("loca")): + """Same as parent class. The only difference is that it attempts to preserve + the 'indexFormat' as encoded in the WOFF2 glyf table. + """ + + def __init__(self, tag=None): + self.tableTag = Tag(tag or "loca") + + def compile(self, ttFont): + try: + max_location = max(self.locations) + except AttributeError: + self.set([]) + max_location = 0 + if "glyf" in ttFont and hasattr(ttFont["glyf"], "indexFormat"): + # copile loca using the indexFormat specified in the WOFF2 glyf table + indexFormat = ttFont["glyf"].indexFormat + if indexFormat == 0: + if max_location >= 0x20000: + raise TTLibError("indexFormat is 0 but local offsets > 0x20000") + if not all(l % 2 == 0 for l in self.locations): + raise TTLibError( + "indexFormat is 0 but local offsets not multiples of 2" + ) + locations = array.array("H") + for i in range(len(self.locations)): + locations.append(self.locations[i] // 2) + else: + locations = array.array("I", self.locations) + if sys.byteorder != "big": + locations.byteswap() + data = locations.tobytes() + else: + # use the most compact indexFormat given the current glyph offsets + data = super(WOFF2LocaTable, self).compile(ttFont) + return data + + +class WOFF2GlyfTable(getTableClass("glyf")): + """Decoder/Encoder for WOFF2 'glyf' table transform.""" + + subStreams = ( + "nContourStream", + "nPointsStream", + "flagStream", + "glyphStream", + "compositeStream", + "bboxStream", + "instructionStream", + ) + + def __init__(self, tag=None): + self.tableTag = Tag(tag or "glyf") + + def reconstruct(self, data, ttFont): + """Decompile transformed 'glyf' data.""" + inputDataSize = len(data) + + if inputDataSize < woff2GlyfTableFormatSize: + raise TTLibError("not enough 'glyf' data") + dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self) + offset = woff2GlyfTableFormatSize + + for stream in self.subStreams: + size = getattr(self, stream + "Size") + setattr(self, stream, data[:size]) + data = data[size:] + offset += size + + hasOverlapSimpleBitmap = self.optionFlags & woff2OverlapSimpleBitmapFlag + self.overlapSimpleBitmap = None + if hasOverlapSimpleBitmap: + overlapSimpleBitmapSize = (self.numGlyphs + 7) >> 3 + self.overlapSimpleBitmap = array.array("B", data[:overlapSimpleBitmapSize]) + offset += overlapSimpleBitmapSize + + if offset != inputDataSize: + raise TTLibError( + "incorrect size of transformed 'glyf' table: expected %d, received %d bytes" + % (offset, inputDataSize) + ) + + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + bboxBitmap = self.bboxStream[:bboxBitmapSize] + self.bboxBitmap = array.array("B", bboxBitmap) + self.bboxStream = self.bboxStream[bboxBitmapSize:] + + self.nContourStream = array.array("h", self.nContourStream) + if sys.byteorder != "big": + self.nContourStream.byteswap() + assert len(self.nContourStream) == self.numGlyphs + + if "head" in ttFont: + ttFont["head"].indexToLocFormat = self.indexFormat + try: + self.glyphOrder = ttFont.getGlyphOrder() + except: + self.glyphOrder = None + if self.glyphOrder is None: + self.glyphOrder = [".notdef"] + self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)]) + else: + if len(self.glyphOrder) != self.numGlyphs: + raise TTLibError( + "incorrect glyphOrder: expected %d glyphs, found %d" + % (len(self.glyphOrder), self.numGlyphs) + ) + + glyphs = self.glyphs = {} + for glyphID, glyphName in enumerate(self.glyphOrder): + glyph = self._decodeGlyph(glyphID) + glyphs[glyphName] = glyph + + def transform(self, ttFont): + """Return transformed 'glyf' data""" + self.numGlyphs = len(self.glyphs) + assert len(self.glyphOrder) == self.numGlyphs + if "maxp" in ttFont: + ttFont["maxp"].numGlyphs = self.numGlyphs + self.indexFormat = ttFont["head"].indexToLocFormat + + for stream in self.subStreams: + setattr(self, stream, b"") + bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2 + self.bboxBitmap = array.array("B", [0] * bboxBitmapSize) + + self.overlapSimpleBitmap = array.array("B", [0] * ((self.numGlyphs + 7) >> 3)) + for glyphID in range(self.numGlyphs): + try: + self._encodeGlyph(glyphID) + except NotImplementedError: + return None + hasOverlapSimpleBitmap = any(self.overlapSimpleBitmap) + + self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream + for stream in self.subStreams: + setattr(self, stream + "Size", len(getattr(self, stream))) + self.version = 0 + self.optionFlags = 0 + if hasOverlapSimpleBitmap: + self.optionFlags |= woff2OverlapSimpleBitmapFlag + data = sstruct.pack(woff2GlyfTableFormat, self) + data += bytesjoin([getattr(self, s) for s in self.subStreams]) + if hasOverlapSimpleBitmap: + data += self.overlapSimpleBitmap.tobytes() + return data + + def _decodeGlyph(self, glyphID): + glyph = getTableModule("glyf").Glyph() + glyph.numberOfContours = self.nContourStream[glyphID] + if glyph.numberOfContours == 0: + return glyph + elif glyph.isComposite(): + self._decodeComponents(glyph) + else: + self._decodeCoordinates(glyph) + self._decodeOverlapSimpleFlag(glyph, glyphID) + self._decodeBBox(glyphID, glyph) + return glyph + + def _decodeComponents(self, glyph): + data = self.compositeStream + glyph.components = [] + more = 1 + haveInstructions = 0 + while more: + component = getTableModule("glyf").GlyphComponent() + more, haveInstr, data = component.decompile(data, self) + haveInstructions = haveInstructions | haveInstr + glyph.components.append(component) + self.compositeStream = data + if haveInstructions: + self._decodeInstructions(glyph) + + def _decodeCoordinates(self, glyph): + data = self.nPointsStream + endPtsOfContours = [] + endPoint = -1 + for i in range(glyph.numberOfContours): + ptsOfContour, data = unpack255UShort(data) + endPoint += ptsOfContour + endPtsOfContours.append(endPoint) + glyph.endPtsOfContours = endPtsOfContours + self.nPointsStream = data + self._decodeTriplets(glyph) + self._decodeInstructions(glyph) + + def _decodeOverlapSimpleFlag(self, glyph, glyphID): + if self.overlapSimpleBitmap is None or glyph.numberOfContours <= 0: + return + byte = glyphID >> 3 + bit = glyphID & 7 + if self.overlapSimpleBitmap[byte] & (0x80 >> bit): + glyph.flags[0] |= _g_l_y_f.flagOverlapSimple + + def _decodeInstructions(self, glyph): + glyphStream = self.glyphStream + instructionStream = self.instructionStream + instructionLength, glyphStream = unpack255UShort(glyphStream) + glyph.program = ttProgram.Program() + glyph.program.fromBytecode(instructionStream[:instructionLength]) + self.glyphStream = glyphStream + self.instructionStream = instructionStream[instructionLength:] + + def _decodeBBox(self, glyphID, glyph): + haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7))) + if glyph.isComposite() and not haveBBox: + raise TTLibError("no bbox values for composite glyph %d" % glyphID) + if haveBBox: + dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph) + else: + glyph.recalcBounds(self) + + def _decodeTriplets(self, glyph): + def withSign(flag, baseval): + assert 0 <= baseval and baseval < 65536, "integer overflow" + return baseval if flag & 1 else -baseval + + nPoints = glyph.endPtsOfContours[-1] + 1 + flagSize = nPoints + if flagSize > len(self.flagStream): + raise TTLibError("not enough 'flagStream' data") + flagsData = self.flagStream[:flagSize] + self.flagStream = self.flagStream[flagSize:] + flags = array.array("B", flagsData) + + triplets = array.array("B", self.glyphStream) + nTriplets = len(triplets) + assert nPoints <= nTriplets + + x = 0 + y = 0 + glyph.coordinates = getTableModule("glyf").GlyphCoordinates.zeros(nPoints) + glyph.flags = array.array("B") + tripletIndex = 0 + for i in range(nPoints): + flag = flags[i] + onCurve = not bool(flag >> 7) + flag &= 0x7F + if flag < 84: + nBytes = 1 + elif flag < 120: + nBytes = 2 + elif flag < 124: + nBytes = 3 + else: + nBytes = 4 + assert (tripletIndex + nBytes) <= nTriplets + if flag < 10: + dx = 0 + dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex]) + elif flag < 20: + dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex]) + dy = 0 + elif flag < 84: + b0 = flag - 20 + b1 = triplets[tripletIndex] + dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4)) + dy = withSign(flag >> 1, 1 + ((b0 & 0x0C) << 2) + (b1 & 0x0F)) + elif flag < 120: + b0 = flag - 84 + dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex]) + dy = withSign( + flag >> 1, 1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1] + ) + elif flag < 124: + b2 = triplets[tripletIndex + 1] + dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4)) + dy = withSign( + flag >> 1, ((b2 & 0x0F) << 8) + triplets[tripletIndex + 2] + ) + else: + dx = withSign( + flag, (triplets[tripletIndex] << 8) + triplets[tripletIndex + 1] + ) + dy = withSign( + flag >> 1, + (triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3], + ) + tripletIndex += nBytes + x += dx + y += dy + glyph.coordinates[i] = (x, y) + glyph.flags.append(int(onCurve)) + bytesConsumed = tripletIndex + self.glyphStream = self.glyphStream[bytesConsumed:] + + def _encodeGlyph(self, glyphID): + glyphName = self.getGlyphName(glyphID) + glyph = self[glyphName] + self.nContourStream += struct.pack(">h", glyph.numberOfContours) + if glyph.numberOfContours == 0: + return + elif glyph.isComposite(): + self._encodeComponents(glyph) + else: + self._encodeCoordinates(glyph) + self._encodeOverlapSimpleFlag(glyph, glyphID) + self._encodeBBox(glyphID, glyph) + + def _encodeComponents(self, glyph): + lastcomponent = len(glyph.components) - 1 + more = 1 + haveInstructions = 0 + for i in range(len(glyph.components)): + if i == lastcomponent: + haveInstructions = hasattr(glyph, "program") + more = 0 + component = glyph.components[i] + self.compositeStream += component.compile(more, haveInstructions, self) + if haveInstructions: + self._encodeInstructions(glyph) + + def _encodeCoordinates(self, glyph): + lastEndPoint = -1 + if _g_l_y_f.flagCubic in glyph.flags: + raise NotImplementedError + for endPoint in glyph.endPtsOfContours: + ptsOfContour = endPoint - lastEndPoint + self.nPointsStream += pack255UShort(ptsOfContour) + lastEndPoint = endPoint + self._encodeTriplets(glyph) + self._encodeInstructions(glyph) + + def _encodeOverlapSimpleFlag(self, glyph, glyphID): + if glyph.numberOfContours <= 0: + return + if glyph.flags[0] & _g_l_y_f.flagOverlapSimple: + byte = glyphID >> 3 + bit = glyphID & 7 + self.overlapSimpleBitmap[byte] |= 0x80 >> bit + + def _encodeInstructions(self, glyph): + instructions = glyph.program.getBytecode() + self.glyphStream += pack255UShort(len(instructions)) + self.instructionStream += instructions + + def _encodeBBox(self, glyphID, glyph): + assert glyph.numberOfContours != 0, "empty glyph has no bbox" + if not glyph.isComposite(): + # for simple glyphs, compare the encoded bounding box info with the calculated + # values, and if they match omit the bounding box info + currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax + calculatedBBox = calcIntBounds(glyph.coordinates) + if currentBBox == calculatedBBox: + return + self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7) + self.bboxStream += sstruct.pack(bboxFormat, glyph) + + def _encodeTriplets(self, glyph): + assert len(glyph.coordinates) == len(glyph.flags) + coordinates = glyph.coordinates.copy() + coordinates.absoluteToRelative() + + flags = array.array("B") + triplets = array.array("B") + for i in range(len(coordinates)): + onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve + x, y = coordinates[i] + absX = abs(x) + absY = abs(y) + onCurveBit = 0 if onCurve else 128 + xSignBit = 0 if (x < 0) else 1 + ySignBit = 0 if (y < 0) else 1 + xySignBits = xSignBit + 2 * ySignBit + + if x == 0 and absY < 1280: + flags.append(onCurveBit + ((absY & 0xF00) >> 7) + ySignBit) + triplets.append(absY & 0xFF) + elif y == 0 and absX < 1280: + flags.append(onCurveBit + 10 + ((absX & 0xF00) >> 7) + xSignBit) + triplets.append(absX & 0xFF) + elif absX < 65 and absY < 65: + flags.append( + onCurveBit + + 20 + + ((absX - 1) & 0x30) + + (((absY - 1) & 0x30) >> 2) + + xySignBits + ) + triplets.append((((absX - 1) & 0xF) << 4) | ((absY - 1) & 0xF)) + elif absX < 769 and absY < 769: + flags.append( + onCurveBit + + 84 + + 12 * (((absX - 1) & 0x300) >> 8) + + (((absY - 1) & 0x300) >> 6) + + xySignBits + ) + triplets.append((absX - 1) & 0xFF) + triplets.append((absY - 1) & 0xFF) + elif absX < 4096 and absY < 4096: + flags.append(onCurveBit + 120 + xySignBits) + triplets.append(absX >> 4) + triplets.append(((absX & 0xF) << 4) | (absY >> 8)) + triplets.append(absY & 0xFF) + else: + flags.append(onCurveBit + 124 + xySignBits) + triplets.append(absX >> 8) + triplets.append(absX & 0xFF) + triplets.append(absY >> 8) + triplets.append(absY & 0xFF) + + self.flagStream += flags.tobytes() + self.glyphStream += triplets.tobytes() + + +class WOFF2HmtxTable(getTableClass("hmtx")): + def __init__(self, tag=None): + self.tableTag = Tag(tag or "hmtx") + + def reconstruct(self, data, ttFont): + (flags,) = struct.unpack(">B", data[:1]) + data = data[1:] + if flags & 0b11111100 != 0: + raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag) + + # When bit 0 is _not_ set, the lsb[] array is present + hasLsbArray = flags & 1 == 0 + # When bit 1 is _not_ set, the leftSideBearing[] array is present + hasLeftSideBearingArray = flags & 2 == 0 + if hasLsbArray and hasLeftSideBearingArray: + raise TTLibError( + "either bits 0 or 1 (or both) must set in transformed '%s' flags" + % self.tableTag + ) + + glyfTable = ttFont["glyf"] + headerTable = ttFont["hhea"] + glyphOrder = glyfTable.glyphOrder + numGlyphs = len(glyphOrder) + numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs) + + assert len(data) >= 2 * numberOfHMetrics + advanceWidthArray = array.array("H", data[: 2 * numberOfHMetrics]) + if sys.byteorder != "big": + advanceWidthArray.byteswap() + data = data[2 * numberOfHMetrics :] + + if hasLsbArray: + assert len(data) >= 2 * numberOfHMetrics + lsbArray = array.array("h", data[: 2 * numberOfHMetrics]) + if sys.byteorder != "big": + lsbArray.byteswap() + data = data[2 * numberOfHMetrics :] + else: + # compute (proportional) glyphs' lsb from their xMin + lsbArray = array.array("h") + for i, glyphName in enumerate(glyphOrder): + if i >= numberOfHMetrics: + break + glyph = glyfTable[glyphName] + xMin = getattr(glyph, "xMin", 0) + lsbArray.append(xMin) + + numberOfSideBearings = numGlyphs - numberOfHMetrics + if hasLeftSideBearingArray: + assert len(data) >= 2 * numberOfSideBearings + leftSideBearingArray = array.array("h", data[: 2 * numberOfSideBearings]) + if sys.byteorder != "big": + leftSideBearingArray.byteswap() + data = data[2 * numberOfSideBearings :] + else: + # compute (monospaced) glyphs' leftSideBearing from their xMin + leftSideBearingArray = array.array("h") + for i, glyphName in enumerate(glyphOrder): + if i < numberOfHMetrics: + continue + glyph = glyfTable[glyphName] + xMin = getattr(glyph, "xMin", 0) + leftSideBearingArray.append(xMin) + + if data: + raise TTLibError("too much '%s' table data" % self.tableTag) + + self.metrics = {} + for i in range(numberOfHMetrics): + glyphName = glyphOrder[i] + advanceWidth, lsb = advanceWidthArray[i], lsbArray[i] + self.metrics[glyphName] = (advanceWidth, lsb) + lastAdvance = advanceWidthArray[-1] + for i in range(numberOfSideBearings): + glyphName = glyphOrder[i + numberOfHMetrics] + self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i]) + + def transform(self, ttFont): + glyphOrder = ttFont.getGlyphOrder() + glyf = ttFont["glyf"] + hhea = ttFont["hhea"] + numberOfHMetrics = hhea.numberOfHMetrics + + # check if any of the proportional glyphs has left sidebearings that + # differ from their xMin bounding box values. + hasLsbArray = False + for i in range(numberOfHMetrics): + glyphName = glyphOrder[i] + lsb = self.metrics[glyphName][1] + if lsb != getattr(glyf[glyphName], "xMin", 0): + hasLsbArray = True + break + + # do the same for the monospaced glyphs (if any) at the end of hmtx table + hasLeftSideBearingArray = False + for i in range(numberOfHMetrics, len(glyphOrder)): + glyphName = glyphOrder[i] + lsb = self.metrics[glyphName][1] + if lsb != getattr(glyf[glyphName], "xMin", 0): + hasLeftSideBearingArray = True + break + + # if we need to encode both sidebearings arrays, then no transformation is + # applicable, and we must use the untransformed hmtx data + if hasLsbArray and hasLeftSideBearingArray: + return + + # set bit 0 and 1 when the respective arrays are _not_ present + flags = 0 + if not hasLsbArray: + flags |= 1 << 0 + if not hasLeftSideBearingArray: + flags |= 1 << 1 + + data = struct.pack(">B", flags) + + advanceWidthArray = array.array( + "H", + [ + self.metrics[glyphName][0] + for i, glyphName in enumerate(glyphOrder) + if i < numberOfHMetrics + ], + ) + if sys.byteorder != "big": + advanceWidthArray.byteswap() + data += advanceWidthArray.tobytes() + + if hasLsbArray: + lsbArray = array.array( + "h", + [ + self.metrics[glyphName][1] + for i, glyphName in enumerate(glyphOrder) + if i < numberOfHMetrics + ], + ) + if sys.byteorder != "big": + lsbArray.byteswap() + data += lsbArray.tobytes() + + if hasLeftSideBearingArray: + leftSideBearingArray = array.array( + "h", + [ + self.metrics[glyphOrder[i]][1] + for i in range(numberOfHMetrics, len(glyphOrder)) + ], + ) + if sys.byteorder != "big": + leftSideBearingArray.byteswap() + data += leftSideBearingArray.tobytes() + + return data + + +class WOFF2FlavorData(WOFFFlavorData): + Flavor = "woff2" + + def __init__(self, reader=None, data=None, transformedTables=None): + """Data class that holds the WOFF2 header major/minor version, any + metadata or private data (as bytes strings), and the set of + table tags that have transformations applied (if reader is not None), + or will have once the WOFF2 font is compiled. + + Args: + reader: an SFNTReader (or subclass) object to read flavor data from. + data: another WOFFFlavorData object to initialise data from. + transformedTables: set of strings containing table tags to be transformed. + + Raises: + ImportError if the brotli module is not installed. + + NOTE: The 'reader' argument, on the one hand, and the 'data' and + 'transformedTables' arguments, on the other hand, are mutually exclusive. + """ + if not haveBrotli: + raise ImportError("No module named brotli") + + if reader is not None: + if data is not None: + raise TypeError("'reader' and 'data' arguments are mutually exclusive") + if transformedTables is not None: + raise TypeError( + "'reader' and 'transformedTables' arguments are mutually exclusive" + ) + + if transformedTables is not None and ( + "glyf" in transformedTables + and "loca" not in transformedTables + or "loca" in transformedTables + and "glyf" not in transformedTables + ): + raise ValueError("'glyf' and 'loca' must be transformed (or not) together") + super(WOFF2FlavorData, self).__init__(reader=reader) + if reader: + transformedTables = [ + tag for tag, entry in reader.tables.items() if entry.transformed + ] + elif data: + self.majorVersion = data.majorVersion + self.majorVersion = data.minorVersion + self.metaData = data.metaData + self.privData = data.privData + if transformedTables is None and hasattr(data, "transformedTables"): + transformedTables = data.transformedTables + + if transformedTables is None: + transformedTables = woff2TransformedTableTags + + self.transformedTables = set(transformedTables) + + def _decompress(self, rawData): + return brotli.decompress(rawData) + + +def unpackBase128(data): + r"""Read one to five bytes from UIntBase128-encoded input string, and return + a tuple containing the decoded integer plus any leftover data. + + >>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00") + True + >>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295 + True + >>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value must not start with leading zeros + >>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128-encoded sequence is longer than 5 bytes + >>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + File "<stdin>", line 1, in ? + TTLibError: UIntBase128 value exceeds 2**32-1 + """ + if len(data) == 0: + raise TTLibError("not enough data to unpack UIntBase128") + result = 0 + if byteord(data[0]) == 0x80: + # font must be rejected if UIntBase128 value starts with 0x80 + raise TTLibError("UIntBase128 value must not start with leading zeros") + for i in range(woff2Base128MaxSize): + if len(data) == 0: + raise TTLibError("not enough data to unpack UIntBase128") + code = byteord(data[0]) + data = data[1:] + # if any of the top seven bits are set then we're about to overflow + if result & 0xFE000000: + raise TTLibError("UIntBase128 value exceeds 2**32-1") + # set current value = old value times 128 bitwise-or (byte bitwise-and 127) + result = (result << 7) | (code & 0x7F) + # repeat until the most significant bit of byte is false + if (code & 0x80) == 0: + # return result plus left over data + return result, data + # make sure not to exceed the size bound + raise TTLibError("UIntBase128-encoded sequence is longer than 5 bytes") + + +def base128Size(n): + """Return the length in bytes of a UIntBase128-encoded sequence with value n. + + >>> base128Size(0) + 1 + >>> base128Size(24567) + 3 + >>> base128Size(2**32-1) + 5 + """ + assert n >= 0 + size = 1 + while n >= 128: + size += 1 + n >>= 7 + return size + + +def packBase128(n): + r"""Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of + bytes using UIntBase128 variable-length encoding. Produce the shortest possible + encoding. + + >>> packBase128(63) == b"\x3f" + True + >>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f' + True + """ + if n < 0 or n >= 2**32: + raise TTLibError("UIntBase128 format requires 0 <= integer <= 2**32-1") + data = b"" + size = base128Size(n) + for i in range(size): + b = (n >> (7 * (size - i - 1))) & 0x7F + if i < size - 1: + b |= 0x80 + data += struct.pack("B", b) + return data + + +def unpack255UShort(data): + """Read one to three bytes from 255UInt16-encoded input string, and return a + tuple containing the decoded integer plus any leftover data. + + >>> unpack255UShort(bytechr(252))[0] + 252 + + Note that some numbers (e.g. 506) can have multiple encodings: + >>> unpack255UShort(struct.pack("BB", 254, 0))[0] + 506 + >>> unpack255UShort(struct.pack("BB", 255, 253))[0] + 506 + >>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0] + 506 + """ + code = byteord(data[:1]) + data = data[1:] + if code == 253: + # read two more bytes as an unsigned short + if len(data) < 2: + raise TTLibError("not enough data to unpack 255UInt16") + (result,) = struct.unpack(">H", data[:2]) + data = data[2:] + elif code == 254: + # read another byte, plus 253 * 2 + if len(data) == 0: + raise TTLibError("not enough data to unpack 255UInt16") + result = byteord(data[:1]) + result += 506 + data = data[1:] + elif code == 255: + # read another byte, plus 253 + if len(data) == 0: + raise TTLibError("not enough data to unpack 255UInt16") + result = byteord(data[:1]) + result += 253 + data = data[1:] + else: + # leave as is if lower than 253 + result = code + # return result plus left over data + return result, data + + +def pack255UShort(value): + r"""Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring + using 255UInt16 variable-length encoding. + + >>> pack255UShort(252) == b'\xfc' + True + >>> pack255UShort(506) == b'\xfe\x00' + True + >>> pack255UShort(762) == b'\xfd\x02\xfa' + True + """ + if value < 0 or value > 0xFFFF: + raise TTLibError("255UInt16 format requires 0 <= integer <= 65535") + if value < 253: + return struct.pack(">B", value) + elif value < 506: + return struct.pack(">BB", 255, value - 253) + elif value < 762: + return struct.pack(">BB", 254, value - 506) + else: + return struct.pack(">BH", 253, value) + + +def compress(input_file, output_file, transform_tables=None): + """Compress OpenType font to WOFF2. + + Args: + input_file: a file path, file or file-like object (open in binary mode) + containing an OpenType font (either CFF- or TrueType-flavored). + output_file: a file path, file or file-like object where to save the + compressed WOFF2 font. + transform_tables: Optional[Iterable[str]]: a set of table tags for which + to enable preprocessing transformations. By default, only 'glyf' + and 'loca' tables are transformed. An empty set means disable all + transformations. + """ + log.info("Processing %s => %s" % (input_file, output_file)) + + font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False) + font.flavor = "woff2" + + if transform_tables is not None: + font.flavorData = WOFF2FlavorData( + data=font.flavorData, transformedTables=transform_tables + ) + + font.save(output_file, reorderTables=False) + + +def decompress(input_file, output_file): + """Decompress WOFF2 font to OpenType font. + + Args: + input_file: a file path, file or file-like object (open in binary mode) + containing a compressed WOFF2 font. + output_file: a file path, file or file-like object where to save the + decompressed OpenType font. + """ + log.info("Processing %s => %s" % (input_file, output_file)) + + font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False) + font.flavor = None + font.flavorData = None + font.save(output_file, reorderTables=True) + + +def main(args=None): + """Compress and decompress WOFF2 fonts""" + import argparse + from fontTools import configLogger + from fontTools.ttx import makeOutputFileName + + class _HelpAction(argparse._HelpAction): + def __call__(self, parser, namespace, values, option_string=None): + subparsers_actions = [ + action + for action in parser._actions + if isinstance(action, argparse._SubParsersAction) + ] + for subparsers_action in subparsers_actions: + for choice, subparser in subparsers_action.choices.items(): + print(subparser.format_help()) + parser.exit() + + class _NoGlyfTransformAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.transform_tables.difference_update({"glyf", "loca"}) + + class _HmtxTransformAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + namespace.transform_tables.add("hmtx") + + parser = argparse.ArgumentParser( + prog="fonttools ttLib.woff2", description=main.__doc__, add_help=False + ) + + parser.add_argument( + "-h", "--help", action=_HelpAction, help="show this help message and exit" + ) + + parser_group = parser.add_subparsers(title="sub-commands") + parser_compress = parser_group.add_parser( + "compress", description="Compress a TTF or OTF font to WOFF2" + ) + parser_decompress = parser_group.add_parser( + "decompress", description="Decompress a WOFF2 font to OTF" + ) + + for subparser in (parser_compress, parser_decompress): + group = subparser.add_mutually_exclusive_group(required=False) + group.add_argument( + "-v", + "--verbose", + action="store_true", + help="print more messages to console", + ) + group.add_argument( + "-q", + "--quiet", + action="store_true", + help="do not print messages to console", + ) + + parser_compress.add_argument( + "input_file", + metavar="INPUT", + help="the input OpenType font (.ttf or .otf)", + ) + parser_decompress.add_argument( + "input_file", + metavar="INPUT", + help="the input WOFF2 font", + ) + + parser_compress.add_argument( + "-o", + "--output-file", + metavar="OUTPUT", + help="the output WOFF2 font", + ) + parser_decompress.add_argument( + "-o", + "--output-file", + metavar="OUTPUT", + help="the output OpenType font", + ) + + transform_group = parser_compress.add_argument_group() + transform_group.add_argument( + "--no-glyf-transform", + dest="transform_tables", + nargs=0, + action=_NoGlyfTransformAction, + help="Do not transform glyf (and loca) tables", + ) + transform_group.add_argument( + "--hmtx-transform", + dest="transform_tables", + nargs=0, + action=_HmtxTransformAction, + help="Enable optional transformation for 'hmtx' table", + ) + + parser_compress.set_defaults( + subcommand=compress, + transform_tables={"glyf", "loca"}, + ) + parser_decompress.set_defaults(subcommand=decompress) + + options = vars(parser.parse_args(args)) + + subcommand = options.pop("subcommand", None) + if not subcommand: + parser.print_help() + return + + quiet = options.pop("quiet") + verbose = options.pop("verbose") + configLogger( + level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"), + ) + + if not options["output_file"]: + if subcommand is compress: + extension = ".woff2" + elif subcommand is decompress: + # choose .ttf/.otf file extension depending on sfntVersion + with open(options["input_file"], "rb") as f: + f.seek(4) # skip 'wOF2' signature + sfntVersion = f.read(4) + assert len(sfntVersion) == 4, "not enough data" + extension = ".otf" if sfntVersion == b"OTTO" else ".ttf" + else: + raise AssertionError(subcommand) + options["output_file"] = makeOutputFileName( + options["input_file"], outputDir=None, extension=extension + ) + + try: + subcommand(**options) + except TTLibError as e: + parser.error(e) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttx.py b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttx.py new file mode 100644 index 0000000000000000000000000000000000000000..0adda52d7422960a45ca4148bab75900fb9f87e9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/fontTools/ttx.py @@ -0,0 +1,467 @@ +"""\ +usage: ttx [options] inputfile1 [... inputfileN] + +TTX -- From OpenType To XML And Back + +If an input file is a TrueType or OpenType font file, it will be +decompiled to a TTX file (an XML-based text format). +If an input file is a TTX file, it will be compiled to whatever +format the data is in, a TrueType or OpenType/CFF font file. +A special input value of - means read from the standard input. + +Output files are created so they are unique: an existing file is +never overwritten. + +General options +=============== + +-h Help print this message. +--version show version and exit. +-d <outputfolder> Specify a directory where the output files are + to be created. +-o <outputfile> Specify a file to write the output to. A special + value of - would use the standard output. +-f Overwrite existing output file(s), ie. don't append + numbers. +-v Verbose: more messages will be written to stdout + about what is being done. +-q Quiet: No messages will be written to stdout about + what is being done. +-a allow virtual glyphs ID's on compile or decompile. + +Dump options +============ + +-l List table info: instead of dumping to a TTX file, list + some minimal info about each table. +-t <table> Specify a table to dump. Multiple -t options + are allowed. When no -t option is specified, all tables + will be dumped. +-x <table> Specify a table to exclude from the dump. Multiple + -x options are allowed. -t and -x are mutually exclusive. +-s Split tables: save the TTX data into separate TTX files per + table and write one small TTX file that contains references + to the individual table dumps. This file can be used as + input to ttx, as long as the table files are in the + same directory. +-g Split glyf table: Save the glyf data into separate TTX files + per glyph and write a small TTX for the glyf table which + contains references to the individual TTGlyph elements. + NOTE: specifying -g implies -s (no need for -s together + with -g) +-i Do NOT disassemble TT instructions: when this option is + given, all TrueType programs (glyph programs, the font + program and the pre-program) will be written to the TTX + file as hex data instead of assembly. This saves some time + and makes the TTX file smaller. +-z <format> Specify a bitmap data export option for EBDT: + {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT: + {'raw', 'extfile'} Each option does one of the following: + + -z raw + export the bitmap data as a hex dump + -z row + export each row as hex data + -z bitwise + export each row as binary in an ASCII art style + -z extfile + export the data as external files with XML references + + If no export format is specified 'raw' format is used. +-e Don't ignore decompilation errors, but show a full traceback + and abort. +-y <number> Select font number for TrueType Collection (.ttc/.otc), + starting from 0. +--unicodedata <UnicodeData.txt> + Use custom database file to write character names in the + comments of the cmap TTX output. +--newline <value> + Control how line endings are written in the XML file. It + can be 'LF', 'CR', or 'CRLF'. If not specified, the + default platform-specific line endings are used. + +Compile options +=============== + +-m Merge with TrueType-input-file: specify a TrueType or + OpenType font file to be merged with the TTX file. This + option is only valid when at most one TTX file is specified. +-b Don't recalc glyph bounding boxes: use the values in the + TTX file as-is. +--recalc-timestamp + Set font 'modified' timestamp to current time. + By default, the modification time of the TTX file will be + used. +--no-recalc-timestamp + Keep the original font 'modified' timestamp. +--flavor <type> + Specify flavor of output font file. May be 'woff' or 'woff2'. + Note that WOFF2 requires the Brotli Python extension, + available at https://github.com/google/brotli +--with-zopfli + Use Zopfli instead of Zlib to compress WOFF. The Python + extension is available at https://pypi.python.org/pypi/zopfli +""" + +from fontTools.ttLib import TTFont, TTLibError +from fontTools.misc.macCreatorType import getMacCreatorAndType +from fontTools.unicode import setUnicodeData +from fontTools.misc.textTools import Tag, tostr +from fontTools.misc.timeTools import timestampSinceEpoch +from fontTools.misc.loggingTools import Timer +from fontTools.misc.cliTools import makeOutputFileName +import os +import sys +import getopt +import re +import logging + + +log = logging.getLogger("fontTools.ttx") + +opentypeheaderRE = re.compile("""sfntVersion=['"]OTTO["']""") + + +class Options(object): + listTables = False + outputDir = None + outputFile = None + overWrite = False + verbose = False + quiet = False + splitTables = False + splitGlyphs = False + disassembleInstructions = True + mergeFile = None + recalcBBoxes = True + ignoreDecompileErrors = True + bitmapGlyphDataFormat = "raw" + unicodedata = None + newlinestr = "\n" + recalcTimestamp = None + flavor = None + useZopfli = False + + def __init__(self, rawOptions, numFiles): + self.onlyTables = [] + self.skipTables = [] + self.fontNumber = -1 + for option, value in rawOptions: + # general options + if option == "-h": + print(__doc__) + sys.exit(0) + elif option == "--version": + from fontTools import version + + print(version) + sys.exit(0) + elif option == "-d": + if not os.path.isdir(value): + raise getopt.GetoptError( + "The -d option value must be an existing directory" + ) + self.outputDir = value + elif option == "-o": + self.outputFile = value + elif option == "-f": + self.overWrite = True + elif option == "-v": + self.verbose = True + elif option == "-q": + self.quiet = True + # dump options + elif option == "-l": + self.listTables = True + elif option == "-t": + # pad with space if table tag length is less than 4 + value = value.ljust(4) + self.onlyTables.append(value) + elif option == "-x": + # pad with space if table tag length is less than 4 + value = value.ljust(4) + self.skipTables.append(value) + elif option == "-s": + self.splitTables = True + elif option == "-g": + # -g implies (and forces) splitTables + self.splitGlyphs = True + self.splitTables = True + elif option == "-i": + self.disassembleInstructions = False + elif option == "-z": + validOptions = ("raw", "row", "bitwise", "extfile") + if value not in validOptions: + raise getopt.GetoptError( + "-z does not allow %s as a format. Use %s" + % (option, validOptions) + ) + self.bitmapGlyphDataFormat = value + elif option == "-y": + self.fontNumber = int(value) + # compile options + elif option == "-m": + self.mergeFile = value + elif option == "-b": + self.recalcBBoxes = False + elif option == "-e": + self.ignoreDecompileErrors = False + elif option == "--unicodedata": + self.unicodedata = value + elif option == "--newline": + validOptions = ("LF", "CR", "CRLF") + if value == "LF": + self.newlinestr = "\n" + elif value == "CR": + self.newlinestr = "\r" + elif value == "CRLF": + self.newlinestr = "\r\n" + else: + raise getopt.GetoptError( + "Invalid choice for --newline: %r (choose from %s)" + % (value, ", ".join(map(repr, validOptions))) + ) + elif option == "--recalc-timestamp": + self.recalcTimestamp = True + elif option == "--no-recalc-timestamp": + self.recalcTimestamp = False + elif option == "--flavor": + self.flavor = value + elif option == "--with-zopfli": + self.useZopfli = True + if self.verbose and self.quiet: + raise getopt.GetoptError("-q and -v options are mutually exclusive") + if self.verbose: + self.logLevel = logging.DEBUG + elif self.quiet: + self.logLevel = logging.WARNING + else: + self.logLevel = logging.INFO + if self.mergeFile and self.flavor: + raise getopt.GetoptError("-m and --flavor options are mutually exclusive") + if self.onlyTables and self.skipTables: + raise getopt.GetoptError("-t and -x options are mutually exclusive") + if self.mergeFile and numFiles > 1: + raise getopt.GetoptError( + "Must specify exactly one TTX source file when using -m" + ) + if self.flavor != "woff" and self.useZopfli: + raise getopt.GetoptError("--with-zopfli option requires --flavor 'woff'") + + +def ttList(input, output, options): + ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True) + reader = ttf.reader + tags = sorted(reader.keys()) + print('Listing table info for "%s":' % input) + format = " %4s %10s %8s %8s" + print(format % ("tag ", " checksum", " length", " offset")) + print(format % ("----", "----------", "--------", "--------")) + for tag in tags: + entry = reader.tables[tag] + if ttf.flavor == "woff2": + # WOFF2 doesn't store table checksums, so they must be calculated + from fontTools.ttLib.sfnt import calcChecksum + + data = entry.loadData(reader.transformBuffer) + checkSum = calcChecksum(data) + else: + checkSum = int(entry.checkSum) + if checkSum < 0: + checkSum = checkSum + 0x100000000 + checksum = "0x%08X" % checkSum + print(format % (tag, checksum, entry.length, entry.offset)) + print() + ttf.close() + + +@Timer(log, "Done dumping TTX in %(time).3f seconds") +def ttDump(input, output, options): + input_name = input + if input == "-": + input, input_name = sys.stdin.buffer, sys.stdin.name + output_name = output + if output == "-": + output, output_name = sys.stdout, sys.stdout.name + log.info('Dumping "%s" to "%s"...', input_name, output_name) + if options.unicodedata: + setUnicodeData(options.unicodedata) + ttf = TTFont( + input, + 0, + ignoreDecompileErrors=options.ignoreDecompileErrors, + fontNumber=options.fontNumber, + ) + ttf.saveXML( + output, + tables=options.onlyTables, + skipTables=options.skipTables, + splitTables=options.splitTables, + splitGlyphs=options.splitGlyphs, + disassembleInstructions=options.disassembleInstructions, + bitmapGlyphDataFormat=options.bitmapGlyphDataFormat, + newlinestr=options.newlinestr, + ) + ttf.close() + + +@Timer(log, "Done compiling TTX in %(time).3f seconds") +def ttCompile(input, output, options): + input_name = input + if input == "-": + input, input_name = sys.stdin, sys.stdin.name + output_name = output + if output == "-": + output, output_name = sys.stdout.buffer, sys.stdout.name + log.info('Compiling "%s" to "%s"...' % (input_name, output)) + if options.useZopfli: + from fontTools.ttLib import sfnt + + sfnt.USE_ZOPFLI = True + ttf = TTFont( + options.mergeFile, + flavor=options.flavor, + recalcBBoxes=options.recalcBBoxes, + recalcTimestamp=options.recalcTimestamp, + ) + ttf.importXML(input) + + if options.recalcTimestamp is None and "head" in ttf and input is not sys.stdin: + # use TTX file modification time for head "modified" timestamp + mtime = os.path.getmtime(input) + ttf["head"].modified = timestampSinceEpoch(mtime) + + ttf.save(output) + + +def guessFileType(fileName): + if fileName == "-": + header = sys.stdin.buffer.peek(256) + ext = "" + else: + base, ext = os.path.splitext(fileName) + try: + with open(fileName, "rb") as f: + header = f.read(256) + except IOError: + return None + + if header.startswith(b"\xef\xbb\xbf<?xml"): + header = header.lstrip(b"\xef\xbb\xbf") + cr, tp = getMacCreatorAndType(fileName) + if tp in ("sfnt", "FFIL"): + return "TTF" + if ext == ".dfont": + return "TTF" + head = Tag(header[:4]) + if head == "OTTO": + return "OTF" + elif head == "ttcf": + return "TTC" + elif head in ("\0\1\0\0", "true"): + return "TTF" + elif head == "wOFF": + return "WOFF" + elif head == "wOF2": + return "WOFF2" + elif head == "<?xm": + # Use 'latin1' because that can't fail. + header = tostr(header, "latin1") + if opentypeheaderRE.search(header): + return "OTX" + else: + return "TTX" + return None + + +def parseOptions(args): + rawOptions, files = getopt.gnu_getopt( + args, + "ld:o:fvqht:x:sgim:z:baey:", + [ + "unicodedata=", + "recalc-timestamp", + "no-recalc-timestamp", + "flavor=", + "version", + "with-zopfli", + "newline=", + ], + ) + + options = Options(rawOptions, len(files)) + jobs = [] + + if not files: + raise getopt.GetoptError("Must specify at least one input file") + + for input in files: + if input != "-" and not os.path.isfile(input): + raise getopt.GetoptError('File not found: "%s"' % input) + tp = guessFileType(input) + if tp in ("OTF", "TTF", "TTC", "WOFF", "WOFF2"): + extension = ".ttx" + if options.listTables: + action = ttList + else: + action = ttDump + elif tp == "TTX": + extension = "." + options.flavor if options.flavor else ".ttf" + action = ttCompile + elif tp == "OTX": + extension = "." + options.flavor if options.flavor else ".otf" + action = ttCompile + else: + raise getopt.GetoptError('Unknown file type: "%s"' % input) + + if options.outputFile: + output = options.outputFile + else: + if input == "-": + raise getopt.GetoptError("Must provide -o when reading from stdin") + output = makeOutputFileName( + input, options.outputDir, extension, options.overWrite + ) + # 'touch' output file to avoid race condition in choosing file names + if action != ttList: + open(output, "a").close() + jobs.append((action, input, output)) + return jobs, options + + +def process(jobs, options): + for action, input, output in jobs: + action(input, output, options) + + +def main(args=None): + """Convert OpenType fonts to XML and back""" + from fontTools import configLogger + + if args is None: + args = sys.argv[1:] + try: + jobs, options = parseOptions(args) + except getopt.GetoptError as e: + print("%s\nERROR: %s" % (__doc__, e), file=sys.stderr) + sys.exit(2) + + configLogger(level=options.logLevel) + + try: + process(jobs, options) + except KeyboardInterrupt: + log.error("(Cancelled.)") + sys.exit(1) + except SystemExit: + raise + except TTLibError as e: + log.error(e) + sys.exit(1) + except: + log.exception("Unhandled exception has occurred") + sys.exit(1) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1fca2de36be27dd42cd23053121f9849126ee502 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__init__.py @@ -0,0 +1,4 @@ +from .cli import cli, deploy +from .commands import custom_component + +__all__ = ["cli", "deploy", "custom_component"] diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__pycache__/cli.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__pycache__/cli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905bc8c7a619705f45e6e87ef882583aaa576413 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/__pycache__/cli.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea33d1ea67474e4324ac5ba273c6f20109534907 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/display.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d2c4f57e30742762432deee6c63328f664a12e1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/display.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/reload.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/reload.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f30614a3eba1ce94c3c346190d22d78d033439 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/__pycache__/reload.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b675f4eec3d4e32f9ba73ed9abab35a43cfffc63 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/_docs_assets.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/_docs_assets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf096ae3c667004408d7b77649dbc3562b5450b8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/_docs_assets.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/app.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/app.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ab4b7a7ba4650b5c57b5bc148cbf0c114b63724 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/app.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/create.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/create.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ff84a1828e72fd1cd6fb606538d69881ee14f4b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/create.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/dev.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/dev.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..353a64103c0cac57b3d322da16a8055d52fdce2a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/dev.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/install_component.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/install_component.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc195e05c00501e3c33e5ac0e9d3f2ae63e0d29 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/install_component.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/publish.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/publish.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b5c311a8995b8c4c8f9c2b794ba03e8400c0af8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/publish.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/show.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/show.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a803e5e4de141bce56588c4e209ecfb4e2af61f9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/__pycache__/show.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/_create_utils.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/_create_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb13fe796b7650f8833a51540b251052c73a631 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/_create_utils.py @@ -0,0 +1,426 @@ +from __future__ import annotations + +import dataclasses +import inspect +import json +import re +import shutil +import textwrap +from pathlib import Path +from typing import Literal + +import gradio + + +def _in_test_dir(): + """Check if the current working directory ends with gradio/js/preview/test.""" + return Path.cwd().parts[-4:] == ("gradio", "js", "preview", "test") + + +default_demo_code = """ +example = {name}().example_value() + +demo = gr.Interface( + lambda x:x, + {name}(), # interactive version of your component + {name}(), # static version of your component + # examples=[[example]], # uncomment this line to view the "example version" of your component +) +""" + +static_only_demo_code = """ +example = {name}().example_value() + +with gr.Blocks() as demo: + with gr.Row(): + {name}(label="Blank"), # blank component + {name}(value=example, label="Populated"), # populated component +""" + +layout_demo_code = """ +with gr.Blocks() as demo: + with {name}(): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) +""" + +fallback_code = """ +with gr.Blocks() as demo: + gr.Markdown("# Change the value (keep it JSON) and the front-end will update automatically.") + {name}(value={{"message": "Hello from Gradio!"}}, label="Static") +""" + + +PATTERN_RE = r"gradio-template-\w+" +PATTERN = "gradio-template-{template}" + + +@dataclasses.dataclass +class ComponentFiles: + template: str + demo_code: str = default_demo_code + python_file_name: str = "" + js_dir: str = "" + + def __post_init__(self): + self.js_dir = self.js_dir or self.template.lower() + self.python_file_name = self.python_file_name or f"{self.template.lower()}.py" + + +OVERRIDES = { + "AnnotatedImage": ComponentFiles( + template="AnnotatedImage", + python_file_name="annotated_image.py", + demo_code=static_only_demo_code, + ), + "HighlightedText": ComponentFiles( + template="HighlightedText", + python_file_name="highlighted_text.py", + demo_code=static_only_demo_code, + ), + "Chatbot": ComponentFiles(template="Chatbot", demo_code=static_only_demo_code), + "Gallery": ComponentFiles(template="Gallery", demo_code=static_only_demo_code), + "HTML": ComponentFiles(template="HTML", demo_code=static_only_demo_code), + "Label": ComponentFiles(template="Label", demo_code=static_only_demo_code), + "Markdown": ComponentFiles(template="Markdown", demo_code=static_only_demo_code), + "Fallback": ComponentFiles(template="Fallback", demo_code=fallback_code), + "Plot": ComponentFiles(template="Plot", demo_code=static_only_demo_code), + "BarPlot": ComponentFiles( + template="BarPlot", + python_file_name="native_plot.py", + js_dir="plot", + demo_code=static_only_demo_code, + ), + "ClearButton": ComponentFiles( + template="ClearButton", + python_file_name="clear_button.py", + js_dir="button", + demo_code=static_only_demo_code, + ), + "ColorPicker": ComponentFiles( + template="ColorPicker", python_file_name="color_picker.py" + ), + "DuplicateButton": ComponentFiles( + template="DuplicateButton", + python_file_name="duplicate_button.py", + js_dir="button", + demo_code=static_only_demo_code, + ), + "FileExplorer": ComponentFiles( + template="FileExplorer", + python_file_name="file_explorer.py", + js_dir="fileexplorer", + demo_code=textwrap.dedent( + """ + import os + + with gr.Blocks() as demo: + {name}(value=os.path.dirname(__file__).split(os.sep)) + """ + ), + ), + "LinePlot": ComponentFiles( + template="LinePlot", + python_file_name="native_plot.py", + js_dir="plot", + demo_code=static_only_demo_code, + ), + "LogoutButton": ComponentFiles( + template="LogoutButton", + python_file_name="logout_button.py", + js_dir="button", + demo_code=static_only_demo_code, + ), + "LoginButton": ComponentFiles( + template="LoginButton", + python_file_name="login_button.py", + js_dir="button", + demo_code=static_only_demo_code, + ), + "ScatterPlot": ComponentFiles( + template="ScatterPlot", + python_file_name="native_plot.py", + js_dir="plot", + demo_code=static_only_demo_code, + ), + "UploadButton": ComponentFiles( + template="UploadButton", + python_file_name="upload_button.py", + demo_code=static_only_demo_code, + ), + "JSON": ComponentFiles( + template="JSON", + python_file_name="json_component.py", + demo_code=static_only_demo_code, + ), + "Row": ComponentFiles( + template="Row", + demo_code=layout_demo_code, + ), + "Column": ComponentFiles( + template="Column", + demo_code=layout_demo_code, + ), + "Tabs": ComponentFiles( + template="Tabs", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(): + with gr.Tab("Tab 1"): + gr.Textbox(value="foo", interactive=True) + with gr.Tab("Tab 2"): + gr.Number(value=10, interactive=True) + """ + ), + ), + "Group": ComponentFiles( + template="Group", + demo_code=layout_demo_code, + ), + "Accordion": ComponentFiles( + template="Accordion", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + with {name}(label="Accordion"): + gr.Textbox(value="foo", interactive=True) + gr.Number(value=10, interactive=True) + """ + ), + ), + "Model3D": ComponentFiles( + template="Model3D", + js_dir="model3D", + demo_code=textwrap.dedent( + """ + with gr.Blocks() as demo: + {name}() + """ + ), + ), + "ImageEditor": ComponentFiles( + template="ImageEditor", + python_file_name="image_editor.py", + js_dir="imageeditor", + ), + "MultimodalTextbox": ComponentFiles( + template="MultimodalTextbox", + python_file_name="multimodal_textbox.py", + js_dir="multimodaltextbox", + ), + "DownloadButton": ComponentFiles( + template="DownloadButton", + python_file_name="download_button.py", + js_dir="downloadbutton", + ), +} + + +def _get_component_code(template: str | None) -> ComponentFiles: + template = template or "Fallback" + if template in OVERRIDES: + return OVERRIDES[template] + else: + return ComponentFiles( + python_file_name=f"{template.lower()}.py", + js_dir=template.lower(), + template=template, + ) + + +def _get_js_dependency_version(name: str, local_js_dir: Path) -> str: + package_json = json.loads( + Path(local_js_dir / name.split("/")[1] / "package.json").read_text() + ) + return package_json["version"] + + +def _modify_js_deps( + package_json: dict, + key: Literal["dependencies", "devDependencies"], + gradio_dir: Path, +): + for dep in package_json.get(key, []): + # if curent working directory is the gradio repo, use the local version of the dependency' + if not _in_test_dir() and dep.startswith("@gradio/"): + package_json[key][dep] = _get_js_dependency_version( + dep, gradio_dir / "_frontend_code" + ) + return package_json + + +def delete_contents(directory: str | Path) -> None: + """Delete all contents of a directory, but not the directory itself.""" + path = Path(directory) + for child in path.glob("*"): + if child.is_file(): + child.unlink() + elif child.is_dir(): + shutil.rmtree(child) + + +def _create_frontend( + name: str, # noqa: ARG001 + component: ComponentFiles, + directory: Path, + package_name: str, +): + frontend = directory / "frontend" + frontend.mkdir(exist_ok=True) + + p = Path(inspect.getfile(gradio)).parent + + def ignore(_src, names): + ignored = [] + for n in names: + if ( + n.startswith("CHANGELOG") + or n.startswith("README.md") + or ".test." in n + or ".stories." in n + or ".spec." in n + ): + ignored.append(n) + return ignored + + shutil.copytree( + str(p / "_frontend_code" / component.js_dir), + frontend, + dirs_exist_ok=True, + ignore=ignore, + ) + source_package_json = json.loads(Path(frontend / "package.json").read_text()) + source_package_json["name"] = package_name + source_package_json = _modify_js_deps(source_package_json, "dependencies", p) + source_package_json = _modify_js_deps(source_package_json, "devDependencies", p) + (frontend / "package.json").write_text(json.dumps(source_package_json, indent=2)) + shutil.copy( + str(Path(__file__).parent / "files" / "gradio.config.js"), + frontend / "gradio.config.js", + ) + + +def _replace_old_class_name(old_class_name: str, new_class_name: str, content: str): + pattern = rf"(?<=\b)(?<!\bimport\s)(?<!\.){re.escape(old_class_name)}(?=\b)" + return re.sub(pattern, new_class_name, content) + + +def _strip_document_lines(content: str): + return "\n".join( + [line for line in content.split("\n") if not line.startswith("@document(")] + ) + + +def _create_backend( + name: str, component: ComponentFiles, directory: Path, package_name: str +): + def find_template_in_list(template, list_to_search): + for item in list_to_search: + if template.lower() == item.lower(): + return item + return None + + lists_to_search = [ + (gradio.components.__all__, "components"), + (gradio.layouts.__all__, "layouts"), + (gradio._simple_templates.__all__, "_simple_templates"), # type: ignore + ] + + correct_cased_template = None + module = None + + for list_, module_name in lists_to_search: + correct_cased_template = find_template_in_list(component.template, list_) + if correct_cased_template: + module = module_name + break + + if not correct_cased_template: + raise ValueError( + f"Cannot find {component.template} in gradio.components, gradio.layouts, or gradio._simple_templates. " + "Please pass in a valid component name via the --template option. It must match the name of the python class." + ) + + if not module: + raise ValueError("Module not found") + + # These README contents are used to install the component but they are overwritten later + readme_contents = textwrap.dedent( + """ +# {package_name} +A Custom Gradio component. + +## Example usage + +```python +import gradio as gr +from {package_name} import {name} +``` +""" + ).format(package_name=package_name, name=name) + (directory / "README.md").write_text(readme_contents) + + backend = directory / "backend" / package_name + backend.mkdir(exist_ok=True, parents=True) + + gitignore = Path(__file__).parent / "files" / "gitignore" + gitignore_contents = gitignore.read_text() + gitignore_dest = directory / ".gitignore" + gitignore_dest.write_text(gitignore_contents) + + pyproject = Path(__file__).parent / "files" / "pyproject_.toml" + pyproject_contents = pyproject.read_text() + pyproject_dest = directory / "pyproject.toml" + pyproject_contents = pyproject_contents.replace("<<name>>", package_name).replace( + "<<template>>", PATTERN.format(template=correct_cased_template) + ) + pyproject_dest.write_text(pyproject_contents) + + demo_dir = directory / "demo" + demo_dir.mkdir(exist_ok=True, parents=True) + + (demo_dir / "app.py").write_text( + f""" +import gradio as gr +from {package_name} import {name} + +{component.demo_code.format(name=name)} + +if __name__ == "__main__": + demo.launch() +""" + ) + (demo_dir / "__init__.py").touch() + + init = backend / "__init__.py" + init.write_text( + f""" +from .{name.lower()} import {name} + +__all__ = ['{name}'] +""" + ) + + p = Path(inspect.getfile(gradio)).parent + python_file = backend / f"{name.lower()}.py" + + shutil.copy( + str(p / module / component.python_file_name), + str(python_file), + ) + + source_pyi_file = p / module / component.python_file_name.replace(".py", ".pyi") + pyi_file = backend / f"{name.lower()}.pyi" + if source_pyi_file.exists(): + shutil.copy(str(source_pyi_file), str(pyi_file)) + + content = python_file.read_text() + content = _replace_old_class_name(correct_cased_template, name, content) + content = _strip_document_lines(content) + python_file.write_text(content) + if pyi_file.exists(): + pyi_content = pyi_file.read_text() + pyi_content = _replace_old_class_name(correct_cased_template, name, content) + pyi_content = _strip_document_lines(pyi_content) + pyi_file.write_text(pyi_content) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/app.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/app.py new file mode 100644 index 0000000000000000000000000000000000000000..d42252a024afc6b4753623516ab95ab2d837e688 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/app.py @@ -0,0 +1,24 @@ +from typer import Typer + +from .build import _build +from .create import _create +from .dev import _dev +from .docs import _docs +from .install_component import _install +from .publish import _publish +from .show import _show + +app = Typer(help="Create and publish a new Gradio component") + +app.command("create", help="Create a new component.")(_create) +app.command( + "build", + help="Build the component for distribution. Must be called from the component directory.", +)(_build) +app.command("dev", help="Launch the custom component demo in development mode.")(_dev) +app.command("show", help="Show the list of available templates")(_show) +app.command("install", help="Install the custom component in the current environment")( + _install +) +app.command("publish", help="Publish a component to PyPI and HuggingFace Hub")(_publish) +app.command("docs", help="Generate documentation for a custom components")(_docs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/build.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/build.py new file mode 100644 index 0000000000000000000000000000000000000000..0cefe668245c6e9004bb8cf8fe63a2a550f69362 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/build.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +import importlib +import shutil +import subprocess +from pathlib import Path +from typing import Annotated, Optional + +import semantic_version +import typer +from tomlkit import dump, parse + +import gradio +from gradio.analytics import custom_component_analytics +from gradio.cli.commands.components._docs_utils import ( + get_deep, +) +from gradio.cli.commands.components.docs import run_command +from gradio.cli.commands.components.install_component import _get_executable_path +from gradio.cli.commands.display import LivePanelDisplay + +gradio_template_path = Path(gradio.__file__).parent / "templates" / "frontend" + + +def _build( + path: Annotated[ + Path, typer.Argument(help="The directory of the custom component.") + ] = Path("."), + build_frontend: Annotated[ + bool, typer.Option(help="Whether to build the frontend as well.") + ] = True, + bump_version: Annotated[ + bool, typer.Option(help="Whether to bump the version number automatically.") + ] = False, + generate_docs: Annotated[ + bool, typer.Option(help="Whether to generate the documentation as well.") + ] = True, + python_path: Annotated[ + Optional[str], + typer.Option( + help="Path to python executable. If None, will use the default path found by `which python3`. If python3 is not found, `which python` will be tried. If both fail an error will be raised." + ), + ] = None, +): + custom_component_analytics( + "build", + None, + None, + None, + None, + generate_docs=generate_docs, + bump_version=bump_version, + ) + name = Path(path).resolve() + if not (name / "pyproject.toml").exists(): + raise ValueError(f"Cannot find pyproject.toml file in {name}") + + with LivePanelDisplay() as live: + live.update( + f":package: Building package in [orange3]{str(name.name)}[/]", add_sleep=0.2 + ) + pyproject_toml = parse((path / "pyproject.toml").read_text()) + package_name = get_deep(pyproject_toml, ["project", "name"]) + + python_path = _get_executable_path( + "python", python_path, "--python-path", check_3=True + ) + + if not isinstance(package_name, str): + raise ValueError( + "Your pyproject.toml file does not have a [project] name field!" + ) + try: + importlib.import_module(package_name) # type: ignore + except ModuleNotFoundError as e: + raise ValueError( + f"Your custom component package ({package_name}) is not installed! " + "Please install it with the gradio cc install command before building it." + ) from e + if bump_version: + pyproject_toml = parse((path / "pyproject.toml").read_text()) + version = semantic_version.Version( + pyproject_toml["project"]["version"] # type: ignore + ).next_patch() + live.update( + f":1234: Using version [bold][magenta]{version}[/][/]. " + "Set [bold][magenta]--no-bump-version[/][/] to use the version in pyproject.toml file." + ) + pyproject_toml["project"]["version"] = str(version) # type: ignore + with open(path / "pyproject.toml", "w", encoding="utf-8") as f: + dump(pyproject_toml, f) + else: + version = pyproject_toml["project"]["version"] # type: ignore + live.update( + f":1234: Package will use version [bold][magenta]{version}[/][/] defined in pyproject.toml file. " + "Set [bold][magenta]--bump-version[/][/] to automatically bump the version number." + ) + + if generate_docs: + _demo_dir = Path("demo").resolve() + _demo_name = "app.py" + _demo_path = _demo_dir / _demo_name + _readme_path = name / "README.md" + + run_command( + live=live, + name=package_name, + suppress_demo_check=False, + pyproject_toml=pyproject_toml, + generate_space=True, + generate_readme=True, + type_mode="simple", + _demo_path=_demo_path, + _demo_dir=_demo_dir, + _readme_path=_readme_path, + space_url=None, + _component_dir=name, + simple=True, + ) + + if build_frontend: + live.update(":art: Building frontend") + component_directory = path.resolve() + + node = shutil.which("node") + if not node: + raise ValueError( + "node must be installed in order to run build command." + ) + + gradio_node_path = subprocess.run( + [node, "-e", "console.log(require.resolve('@gradio/preview'))"], + cwd=Path(component_directory / "frontend"), + check=False, + capture_output=True, + ) + + if gradio_node_path.returncode != 0: + raise ValueError( + "Could not find `@gradio/preview`. Run `npm i -D @gradio/preview` in your frontend folder." + ) + + gradio_node_path = gradio_node_path.stdout.decode("utf-8").strip() + + node_cmds = [ + node, + gradio_node_path, + "--component-directory", + component_directory, + "--root", + gradio_template_path, + "--mode", + "build", + "--python-path", + python_path, + ] + pipe = subprocess.run( + node_cmds, capture_output=True, text=True, check=False + ) + if pipe.returncode != 0: + live.update(":red_square: Build failed!") + live.update(pipe.stderr) + live.update(pipe.stdout) + raise SystemExit("Frontend build failed") + else: + live.update(":white_check_mark: Build succeeded!") + + cmds = [python_path, "-m", "build", str(name)] + live.update(f":construction_worker: Building... [grey37]({' '.join(cmds)})[/]") + pipe = subprocess.run(cmds, capture_output=True, text=True, check=False) + if pipe.returncode != 0: + live.update(":red_square: Build failed!") + live.update(pipe.stderr) + raise SystemExit("Python build failed") + else: + live.update(":white_check_mark: Build succeeded!") + live.update( + f":ferris_wheel: Wheel located in [orange3]{str(name / 'dist')}[/]" + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/create.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/create.py new file mode 100644 index 0000000000000000000000000000000000000000..410c1ddde89a1c78cc8b7bc74fb569801c67effa --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/create.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import shutil +from pathlib import Path +from typing import Annotated, Optional + +import typer +from rich import print +from rich.panel import Panel +from rich.prompt import Confirm, Prompt +from tomlkit import dump, parse + +from gradio.analytics import custom_component_analytics + +from ..display import LivePanelDisplay +from . import _create_utils +from .install_component import _get_npm, _install_command + + +def _create( + name: Annotated[ + str, + typer.Argument( + help="Name of the component. Preferably in camel case, i.e. MyTextBox." + ), + ], + directory: Annotated[ + Optional[Path], + typer.Option( + help="Directory to create the component in. Default is None. If None, will be created in <component-name> directory in the current directory." + ), + ] = None, + package_name: Annotated[ + Optional[str], + typer.Option(help="Name of the package. Default is gradio_{name.lower()}"), + ] = None, + template: Annotated[ + str, + typer.Option( + help="Component to use as a template. Should use exact name of python class." + ), + ] = "", + install: Annotated[ + bool, + typer.Option( + help="Whether to install the component in your current environment as a development install. Recommended for development." + ), + ] = True, + npm_install: Annotated[ + str, + typer.Option(help="NPM install command to use. Default is 'npm install'."), + ] = "npm install", + pip_path: Annotated[ + Optional[str], + typer.Option( + help="Path to pip executable. If None, will use the default path found by `which pip3`. If pip3 is not found, `which pip` will be tried. If both fail an error will be raised." + ), + ] = None, + overwrite: Annotated[ + bool, + typer.Option(help="Whether to overwrite the existing component if it exists."), + ] = False, + configure_metadata: Annotated[ + bool, + typer.Option( + help="Whether to interactively configure project metadata based on user input" + ), + ] = True, +): + custom_component_analytics( + "create", + template, + None, + None, + None, + npm_install=npm_install, + ) + if not directory: + directory = Path(name.lower()) + if not package_name: + package_name = f"gradio_{name.lower()}" + + if directory.exists() and not overwrite: + raise ValueError( + f"The directory {directory.resolve()} already exists. " + "Please set --overwrite flag or pass in the name " + "of a directory that does not already exist via the --directory option." + ) + elif directory.exists() and overwrite: + _create_utils.delete_contents(directory) + + directory.mkdir(exist_ok=overwrite) + + if _create_utils._in_test_dir(): + npm_install = f"{shutil.which('pnpm')} i --ignore-scripts" + else: + npm_install = _get_npm(npm_install) + + with LivePanelDisplay() as live: + live.update( + f":building_construction: Creating component [orange3]{name}[/] in directory [orange3]{directory}[/]", + add_sleep=0.2, + ) + if template: + live.update(f":fax: Starting from template [orange3]{template}[/]") + else: + live.update(":page_facing_up: Creating a new component from scratch.") + + component = _create_utils._get_component_code(template) + + _create_utils._create_backend(name, component, directory, package_name) + live.update(":snake: Created backend code", add_sleep=0.2) + + _create_utils._create_frontend( + name.lower(), component, directory=directory, package_name=package_name + ) + live.update(":art: Created frontend code", add_sleep=0.2) + + if install: + _install_command(directory, live, npm_install, pip_path) + + live._panel.stop() + + description = "A gradio custom component" + keywords = [] + + if configure_metadata: + print( + Panel( + "It is recommended to answer the following [bold][magenta]4 questions[/][/] to finish configuring your custom component's metadata." + "\nYou can also answer them later by editing the [bold][magenta]pyproject.toml[/][/] file in your component directory." + ) + ) + + answer_qs = Confirm.ask("\nDo you want to answer them now?") + + pyproject_toml = parse((directory / "pyproject.toml").read_text()) + + if answer_qs: + name = pyproject_toml["project"]["name"] # type: ignore + + description = Prompt.ask( + "\n:pencil: Please enter a one sentence [bold][magenta]description[/][/] for your component" + ) + if description: + pyproject_toml["project"]["description"] = description # type: ignore + + license_ = ( + Prompt.ask( + "\n:bookmark_tabs: Please enter a [bold][magenta]software license[/][/] for your component. Leave blank for 'apache-2.0'" + ) + or "apache-2.0" + ) + print(f":bookmark_tabs: Using license [bold][magenta]{license_}[/][/]") + pyproject_toml["project"]["license"] = license_ # type: ignore + + requires_python = Prompt.ask( + "\n:snake: Please enter the [bold][magenta]allowed python[/][/] versions for your component. Leave blank for '>=3.10'" + ) + requires_python = requires_python or ">=3.10" + print( + f":snake: Using requires-python of [bold][magenta]{requires_python}[/][/]" + ) + pyproject_toml["project"]["requires-python"] = ( # type: ignore + requires_python or ">=3.10" + ) + + print( + "\n:label: Please add some keywords to help others discover your component." + ) + while True: + keyword = Prompt.ask(":label: Leave blank to stop adding keywords") + if keyword: + keywords.append(keyword) + else: + break + current_keywords = pyproject_toml["project"].get("keywords", []) # type: ignore + pyproject_toml["project"]["keywords"] = current_keywords + keywords # type: ignore + with open(directory / "pyproject.toml", "w", encoding="utf-8") as f: + dump(pyproject_toml, f) + + (directory / "demo" / "requirements.txt").write_text(package_name) + readme_path = Path(__file__).parent / "files" / "README.md" + + readme_contents = readme_path.read_text() + tags = f", {', '.join(keywords)}" if keywords else "" + template = f", {template}" + readme_contents = ( + readme_contents.replace("<<title>>", package_name) + .replace("<<short-description>>", description) + .replace("<<tags>>", tags) + .replace("<<template>>", template) + ) + (directory / "README.md").write_text(readme_contents) + + print("\nComponent creation [bold][magenta]complete[/][/]!") diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/docs.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/docs.py new file mode 100644 index 0000000000000000000000000000000000000000..145d9073abc3767e755ba2ed3f8bb7277318f8eb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/docs.py @@ -0,0 +1,204 @@ +from __future__ import annotations + +import importlib +import re +from pathlib import Path +from typing import Annotated, Any, Optional + +import requests +import tomlkit as toml +from typer import Argument, Option + +from gradio.analytics import custom_component_analytics +from gradio.cli.commands.display import LivePanelDisplay + +from ._docs_assets import css +from ._docs_utils import extract_docstrings, get_deep, make_markdown, make_space + + +def _docs( + path: Annotated[ + Path, Argument(help="The directory of the custom component.") + ] = Path("."), + demo_dir: Annotated[ + Optional[Path], Option(help="Path to the demo directory.") + ] = None, + demo_name: Annotated[Optional[str], Option(help="Name of the demo file.")] = None, + readme_path: Annotated[ + Optional[Path], Option(help="Path to the README.md file.") + ] = None, + space_url: Annotated[ + Optional[str], Option(help="URL of the Space to use for the demo.") + ] = None, + generate_space: Annotated[ + bool, + Option( + help="Create a documentation space for the custom compone.", is_flag=True + ), + ] = True, + generate_readme: Annotated[ + bool, + Option(help="Create a README.md file for the custom component.", is_flag=True), + ] = True, + suppress_demo_check: Annotated[ + bool, + Option( + help="Suppress demo warnings and errors.", + is_flag=True, + ), + ] = False, +): + """Runs the documentation generator.""" + custom_component_analytics( + "docs", + None, + None, + None, + None, + ) + + _component_dir = Path(path).resolve() + _demo_dir = Path(demo_dir).resolve() if demo_dir else Path("demo").resolve() + _demo_name = demo_name if demo_name else "app.py" + _demo_path = _demo_dir / _demo_name + _readme_path = ( + Path(readme_path).resolve() if readme_path else _component_dir / "README.md" + ) + + if not generate_space and not generate_readme: + raise ValueError("Must generate at least one of space or readme") + + with LivePanelDisplay() as live: + live.update( + f":page_facing_up: Generating documentation for [orange3]{str(_component_dir.name)}[/]", + add_sleep=0.2, + ) + live.update( + f":eyes: Reading project metadata from [orange3]{_component_dir}/pyproject.toml[/]\n" + ) + + if not (_component_dir / "pyproject.toml").exists(): + raise ValueError( + f"Cannot find pyproject.toml file in [orange3]{_component_dir}[/]" + ) + + with open(_component_dir / "pyproject.toml", encoding="utf-8") as f: + data = toml.loads(f.read()) + + name = get_deep(data, ["project", "name"]) + + if not isinstance(name, str): + raise ValueError("Name not found in pyproject.toml") + + run_command( + live=live, + name=name, + suppress_demo_check=suppress_demo_check, + pyproject_toml=data, + generate_space=generate_space, + generate_readme=generate_readme, + type_mode="simple", + _demo_path=_demo_path, + _demo_dir=_demo_dir, + _readme_path=_readme_path, + space_url=space_url, + _component_dir=_component_dir, + ) + + +def run_command( + live: LivePanelDisplay, + name: str, + pyproject_toml: dict[str, Any], + suppress_demo_check: bool, + generate_space: bool, + generate_readme: bool, + type_mode: str, + _demo_path: Path, + _demo_dir: Path, + _readme_path: Path, + space_url: str | None, + _component_dir: Path, + simple: bool = False, +): + with open(_demo_path, encoding="utf-8") as f: + demo = f.read() + + pypi_exists = requests.get(f"https://pypi.org/pypi/{name}/json").status_code + + pypi_exists = pypi_exists == 200 or False + + local_version = get_deep(pyproject_toml, ["project", "version"]) + description = str(get_deep(pyproject_toml, ["project", "description"]) or "") + repo = get_deep(pyproject_toml, ["project", "urls", "repository"]) + space = ( + space_url + if space_url + else get_deep(pyproject_toml, ["project", "urls", "space"]) + ) + + if not local_version and not pypi_exists: + raise ValueError( + f"Cannot find version in pyproject.toml or on PyPI for [orange3]{name}[/].\nIf you have just published to PyPI, please wait a few minutes and try again." + ) + module = importlib.import_module(name) + (docs, type_mode) = extract_docstrings(module) + + if generate_space: + if not simple: + live.update(":computer: [blue]Generating space.[/]") + + source = make_space( + docs=docs, + name=name, + description=description, + local_version=local_version + if local_version is None + else str(local_version), + demo=demo, + space=space if space is None else str(space), + repo=repo if repo is None else str(repo), + pypi_exists=pypi_exists, + suppress_demo_check=suppress_demo_check, + ) + + with open(_demo_dir / "space.py", "w", encoding="utf-8") as f: + f.write(source) + if not simple: + live.update( + f":white_check_mark: Space created in [orange3]{_demo_dir}/space.py[/]\n" + ) + with open(_demo_dir / "css.css", "w", encoding="utf-8") as f: + f.write(css) + + if generate_readme: + if not simple: + live.update(":pencil: [blue]Generating README.[/]") + readme = make_markdown( + docs, name, description, local_version, demo, space, repo, pypi_exists + ) + + readme_content = Path(_readme_path).read_text() + + with open(_readme_path, "w", encoding="utf-8") as f: + yaml_regex = re.search( + "(?:^|[\r\n])---[\n\r]+([\\S\\s]*?)[\n\r]+---([\n\r]|$)", readme_content + ) + if yaml_regex is not None: + readme = readme_content[: yaml_regex.span()[-1]] + readme + f.write(readme) + if not simple: + live.update( + f":white_check_mark: README generated in [orange3]{_readme_path}[/]" + ) + if simple: + short_readme_path = Path(_readme_path).relative_to(_component_dir) + short_demo_path = Path(_demo_dir / "space.py").relative_to(_component_dir) + live.update( + f":white_check_mark: Documentation generated in [orange3]{short_demo_path}[/] and [orange3]{short_readme_path}[/]. Pass --no-generate-docs to disable auto documentation." + ) + + if type_mode == "simple": + live.update( + "\n:orange_circle: [red]The docs were generated in simple mode. Updating python to a more recent version will result in richer documentation.[/]" + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/README.md b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c8256ec0fdf5c05bebcb6c744391b74cb526f06 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/README.md @@ -0,0 +1,15 @@ +--- +tags: [gradio-custom-component<<template>><<tags>>] +title: <<title>> +short_description: <<short-description>> +colorFrom: blue +colorTo: yellow +sdk: gradio +pinned: false +app_file: space.py +--- + +# <<title>> + +You can auto-generate documentation for your custom component with the `gradio cc docs` command. +You can also edit this file however you like. diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/gitignore b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/gitignore new file mode 100644 index 0000000000000000000000000000000000000000..5dd75ecdbb6f74962bbede3376d2f1182679ec19 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/files/gitignore @@ -0,0 +1,12 @@ +.eggs/ +dist/ +*.pyc +__pycache__/ +*.py[cod] +*$py.class +__tmp/* +*.pyi +.mypycache +.ruff_cache +node_modules +backend/**/templates/ \ No newline at end of file diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/install_component.py b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/install_component.py new file mode 100644 index 0000000000000000000000000000000000000000..6dd0a9057366d486243e81ffe90ab0cbf96bd15c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/cli/commands/components/install_component.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import shutil +import subprocess +from pathlib import Path +from typing import Annotated, Optional + +from rich.markup import escape +from typer import Argument, Option + +from gradio.cli.commands.display import LivePanelDisplay +from gradio.utils import set_directory + + +def _get_npm(npm_install: str): + npm_install = npm_install.strip() + if npm_install == "npm install": + npm = shutil.which("npm") + if not npm: + raise ValueError( + "By default, the install command uses npm to install " + "the frontend dependencies. Please install npm or pass your own install command " + "via the --npm-install option." + ) + npm_install = f"{npm} install" + return npm_install + + +def _get_executable_path( + executable: str, + executable_path: str | None, + cli_arg_name: str, + check_3: bool = False, +) -> str: + """Get the path to an executable, either from the provided path or from the PATH environment variable. + + The value of executable_path takes precedence in case the value in PATH is incorrect. + This should give more control to the developer in case their envrinment is not set up correctly. + + If check_3 is True, we append 3 to the executable name to give python3 priority over python (same for pip). + """ + if executable_path: + if not Path(executable_path).exists() or not Path(executable_path).is_file(): + raise ValueError( + f"The provided {executable} path ({executable_path}) does not exist or is not a file." + ) + return executable_path + path = shutil.which(executable) + if check_3: + path = shutil.which(f"{executable}3") or path + if not path: + raise ValueError( + f"Could not find {executable}. Please ensure it is installed and in your PATH or pass the {cli_arg_name} parameter." + ) + return path + + +def _install_command( + directory: Path, live: LivePanelDisplay, npm_install: str, pip_path: str | None +): + pip_executable_path = _get_executable_path( + "pip", executable_path=pip_path, cli_arg_name="--pip-path", check_3=True + ) + cmds = [pip_executable_path, "install", "-e", f"{str(directory)}[dev]"] + live.update( + f":construction_worker: Installing python... [grey37]({escape(' '.join(cmds))})[/]" + ) + pipe = subprocess.run(cmds, capture_output=True, text=True, check=False) + + if pipe.returncode != 0: + live.update(":red_square: Python installation [bold][red]failed[/][/]") + live.update(pipe.stderr) + raise SystemExit("Python installation failed") + + else: + live.update(":white_check_mark: Python install succeeded!") + + live.update( + f":construction_worker: Installing javascript... [grey37]({npm_install})[/]" + ) + with set_directory(directory / "frontend"): + pipe = subprocess.run( + npm_install.split(), capture_output=True, text=True, check=False + ) + if pipe.returncode != 0: + live.update(":red_square: NPM install [bold][red]failed[/][/]") + live.update(pipe.stdout) + live.update(pipe.stderr) + raise SystemExit("NPM install failed") + else: + live.update(":white_check_mark: NPM install succeeded!") + + +def _install( + directory: Annotated[ + Path, Argument(help="The directory containing the custom components.") + ] = Path("."), + npm_install: Annotated[ + str, Option(help="NPM install command to use. Default is 'npm install'.") + ] = "npm install", + pip_path: Annotated[ + Optional[str], + Option( + help="Path to pip executable. If None, will use the default path found by `which pip3`. If pip3 is not found, `which pip` will be tried. If both fail an error will be raised." + ), + ] = None, +): + npm_install = _get_npm(npm_install) + with LivePanelDisplay() as live: + _install_command(directory, live, npm_install, pip_path) diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca47b2be229fc602bdf11466e56a66b821ef355a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/dropdown.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/dropdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7dbd9adf3cb5d9aeb26d629267584fb10b6af6f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/dropdown.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/file_explorer.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/file_explorer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b32473e22e62a63f5d1e7fc27552c1eec108d90b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/file_explorer.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/gallery.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/gallery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43aef5d366c1b9bb9c2423c5c7796355e50ba7d9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/gradio/components/__pycache__/gallery.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.BQnekPhB.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.BQnekPhB.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..89603afc4d088c8656658ea783232e95f04204db --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Index.BQnekPhB.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c152787c9f2bbfd25263823263ab4b31cedfcd2258c2d73882e0f3fa1adbd17b +size 3946 diff --git a/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Music.BKn1BNLT.js.gz b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Music.BKn1BNLT.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..23cb12c77dd6a2d17ef44bfd3cc5b0bc567db643 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/gradio/templates/node/build/client/_app/immutable/chunks/Music.BKn1BNLT.js.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b96d3a70209f170426d3447d7bc31b2ded67bd22d9ea4bcf1150e8f56660c63 +size 763