text stringlengths 0 1.05M | meta dict |
|---|---|
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.fixedTools import (
ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
from . import DefaultTable
import math
hheaFormat = """
> # big endian
tableVersion: L
ascent: h
descent: h
lineGap: h
advanceWidthMax: H
minLeftSideBearing: h
minRightSideBearing: h
xMaxExtent: h
caretSlopeRise: h
caretSlopeRun: h
caretOffset: h
reserved0: h
reserved1: h
reserved2: h
reserved3: h
metricDataFormat: h
numberOfHMetrics: H
"""
class table__h_h_e_a(DefaultTable.DefaultTable):
# Note: Keep in sync with table__v_h_e_a
dependencies = ['hmtx', 'glyf', 'CFF ', 'CFF2']
# OpenType spec renamed these, add aliases for compatibility
@property
def ascender(self): return self.ascent
@ascender.setter
def ascender(self,value): self.ascent = value
@property
def descender(self): return self.descent
@descender.setter
def descender(self,value): self.descent = value
def decompile(self, data, ttFont):
sstruct.unpack(hheaFormat, data, self)
def compile(self, ttFont):
if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
self.recalc(ttFont)
self.tableVersion = fi2ve(self.tableVersion)
return sstruct.pack(hheaFormat, self)
def recalc(self, ttFont):
if 'hmtx' in ttFont:
hmtxTable = ttFont['hmtx']
self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
boundsWidthDict = {}
if 'glyf' in ttFont:
glyfTable = ttFont['glyf']
for name in ttFont.getGlyphOrder():
g = glyfTable[name]
if g.numberOfContours == 0:
continue
if g.numberOfContours < 0 and not hasattr(g, "xMax"):
# Composite glyph without extents set.
# Calculate those.
g.recalcBounds(glyfTable)
boundsWidthDict[name] = g.xMax - g.xMin
elif 'CFF ' in ttFont or 'CFF2' in ttFont:
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
else:
topDict = ttFont['CFF2'].cff.topDictIndex[0]
charStrings = topDict.CharStrings
for name in ttFont.getGlyphOrder():
cs = charStrings[name]
bounds = cs.calcBounds(charStrings)
if bounds is not None:
boundsWidthDict[name] = int(
math.ceil(bounds[2]) - math.floor(bounds[0]))
if boundsWidthDict:
minLeftSideBearing = float('inf')
minRightSideBearing = float('inf')
xMaxExtent = -float('inf')
for name, boundsWidth in boundsWidthDict.items():
advanceWidth, lsb = hmtxTable[name]
rsb = advanceWidth - lsb - boundsWidth
extent = lsb + boundsWidth
minLeftSideBearing = min(minLeftSideBearing, lsb)
minRightSideBearing = min(minRightSideBearing, rsb)
xMaxExtent = max(xMaxExtent, extent)
self.minLeftSideBearing = minLeftSideBearing
self.minRightSideBearing = minRightSideBearing
self.xMaxExtent = xMaxExtent
else: # No glyph has outlines.
self.minLeftSideBearing = 0
self.minRightSideBearing = 0
self.xMaxExtent = 0
def toXML(self, writer, ttFont):
formatstring, names, fixes = sstruct.getformat(hheaFormat)
for name in names:
value = getattr(self, name)
if name == "tableVersion":
value = fi2ve(value)
value = "0x%08x" % value
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
setattr(self, name, ve2fi(attrs["value"]))
return
setattr(self, name, safeEval(attrs["value"]))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_h_e_a.py",
"copies": "5",
"size": "3655",
"license": "apache-2.0",
"hash": 7781337394419622000,
"line_mean": 28.7154471545,
"line_max": 109,
"alpha_frac": 0.6714090287,
"autogenerated": false,
"ratio": 2.976384364820847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02712166170414297,
"num_lines": 123
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from fontTools.misc.timeTools import timestampFromString, timestampToString
from . import DefaultTable
FFTMFormat = """
> # big endian
version: I
FFTimeStamp: Q
sourceCreated: Q
sourceModified: Q
"""
class table_F_F_T_M_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
def compile(self, ttFont):
data = sstruct.pack(FFTMFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("FontForge's timestamp, font source creation and modification dates")
writer.newline()
formatstring, names, fixes = sstruct.getformat(FFTMFormat)
for name in names:
value = getattr(self, name)
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
value = timestampToString(value)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
value = timestampFromString(value)
else:
value = safeEval(value)
setattr(self, name, value)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/F_F_T_M_.py",
"copies": "5",
"size": "1188",
"license": "apache-2.0",
"hash": -4934979615568756000,
"line_mean": 28.7,
"line_max": 86,
"alpha_frac": 0.7272727273,
"autogenerated": false,
"ratio": 3.3184357541899443,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026858791073723687,
"num_lines": 40
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import array
import itertools
import logging
import struct
import sys
import fontTools.ttLib.tables.TupleVariation as tv
log = logging.getLogger(__name__)
TupleVariation = tv.TupleVariation
# https://www.microsoft.com/typography/otspec/gvar.htm
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
#
# Apple's documentation of 'gvar':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
#
# FreeType2 source code for parsing 'gvar':
# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c
GVAR_HEADER_FORMAT = """
> # big endian
version: H
reserved: H
axisCount: H
sharedTupleCount: H
offsetToSharedTuples: I
glyphCount: H
flags: H
offsetToGlyphVariationData: I
"""
GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT)
class table__g_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar", "glyf"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.version, self.reserved = 1, 0
self.variations = {}
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
sharedTuples = tv.compileSharedTuples(
axisTags, itertools.chain(*self.variations.values()))
sharedTupleIndices = {coord:i for i, coord in enumerate(sharedTuples)}
sharedTupleSize = sum([len(c) for c in sharedTuples])
compiledGlyphs = self.compileGlyphs_(
ttFont, axisTags, sharedTupleIndices)
offset = 0
offsets = []
for glyph in compiledGlyphs:
offsets.append(offset)
offset += len(glyph)
offsets.append(offset)
compiledOffsets, tableFormat = self.compileOffsets_(offsets)
header = {}
header["version"] = self.version
header["reserved"] = self.reserved
header["axisCount"] = len(axisTags)
header["sharedTupleCount"] = len(sharedTuples)
header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
header["glyphCount"] = len(compiledGlyphs)
header["flags"] = tableFormat
header["offsetToGlyphVariationData"] = header["offsetToSharedTuples"] + sharedTupleSize
compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
result = [compiledHeader, compiledOffsets]
result.extend(sharedTuples)
result.extend(compiledGlyphs)
return b''.join(result)
def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
result = []
glyf = ttFont['glyf']
for glyphName in ttFont.getGlyphOrder():
glyph = glyf[glyphName]
pointCount = self.getNumPoints_(glyph)
variations = self.variations.get(glyphName, [])
result.append(compileGlyph_(variations, pointCount,
axisTags, sharedCoordIndices))
return result
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
glyphs = ttFont.getGlyphOrder()
sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
assert len(glyphs) == self.glyphCount
assert len(axisTags) == self.axisCount
offsets = self.decompileOffsets_(data[GVAR_HEADER_SIZE:], tableFormat=(self.flags & 1), glyphCount=self.glyphCount)
sharedCoords = tv.decompileSharedTuples(
axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples)
self.variations = {}
offsetToData = self.offsetToGlyphVariationData
glyf = ttFont['glyf']
for i in range(self.glyphCount):
glyphName = glyphs[i]
glyph = glyf[glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
gvarData = data[offsetToData + offsets[i] : offsetToData + offsets[i + 1]]
try:
self.variations[glyphName] = decompileGlyph_(
numPointsInGlyph, sharedCoords, axisTags, gvarData)
except Exception:
log.error(
"Failed to decompile deltas for glyph '%s' (%d points)",
glyphName, numPointsInGlyph,
)
raise
@staticmethod
def decompileOffsets_(data, tableFormat, glyphCount):
if tableFormat == 0:
# Short format: array of UInt16
offsets = array.array("H")
offsetsSize = (glyphCount + 1) * 2
else:
# Long format: array of UInt32
offsets = array.array("I")
offsetsSize = (glyphCount + 1) * 4
offsets.frombytes(data[0 : offsetsSize])
if sys.byteorder != "big": offsets.byteswap()
# In the short format, offsets need to be multiplied by 2.
# This is not documented in Apple's TrueType specification,
# but can be inferred from the FreeType implementation, and
# we could verify it with two sample GX fonts.
if tableFormat == 0:
offsets = [off * 2 for off in offsets]
return offsets
@staticmethod
def compileOffsets_(offsets):
"""Packs a list of offsets into a 'gvar' offset table.
Returns a pair (bytestring, tableFormat). Bytestring is the
packed offset table. Format indicates whether the table
uses short (tableFormat=0) or long (tableFormat=1) integers.
The returned tableFormat should get packed into the flags field
of the 'gvar' header.
"""
assert len(offsets) >= 2
for i in range(1, len(offsets)):
assert offsets[i - 1] <= offsets[i]
if max(offsets) <= 0xffff * 2:
packed = array.array("H", [n >> 1 for n in offsets])
tableFormat = 0
else:
packed = array.array("I", offsets)
tableFormat = 1
if sys.byteorder != "big": packed.byteswap()
return (packed.tobytes(), tableFormat)
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
writer.simpletag("reserved", value=self.reserved)
writer.newline()
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for glyphName in ttFont.getGlyphNames():
variations = self.variations.get(glyphName)
if not variations:
continue
writer.begintag("glyphVariations", glyph=glyphName)
writer.newline()
for gvar in variations:
gvar.toXML(writer, axisTags)
writer.endtag("glyphVariations")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
elif name == "reserved":
self.reserved = safeEval(attrs["value"])
elif name == "glyphVariations":
if not hasattr(self, "variations"):
self.variations = {}
glyphName = attrs["glyph"]
glyph = ttFont["glyf"][glyphName]
numPointsInGlyph = self.getNumPoints_(glyph)
glyphVariations = []
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
if name == "tuple":
gvar = TupleVariation({}, [None] * numPointsInGlyph)
glyphVariations.append(gvar)
for tupleElement in content:
if isinstance(tupleElement, tuple):
tupleName, tupleAttrs, tupleContent = tupleElement
gvar.fromXML(tupleName, tupleAttrs, tupleContent)
self.variations[glyphName] = glyphVariations
@staticmethod
def getNumPoints_(glyph):
NUM_PHANTOM_POINTS = 4
if glyph.isComposite():
return len(glyph.components) + NUM_PHANTOM_POINTS
else:
# Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
def compileGlyph_(variations, pointCount, axisTags, sharedCoordIndices):
tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
variations, pointCount, axisTags, sharedCoordIndices)
if tupleVariationCount == 0:
return b""
result = [
struct.pack(">HH", tupleVariationCount, 4 + len(tuples)),
tuples,
data
]
if (len(tuples) + len(data)) % 2 != 0:
result.append(b"\0") # padding
return b''.join(result)
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
if len(data) < 4:
return []
tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
dataPos = offsetToData
return tv.decompileTupleVariationStore(
"gvar", axisTags,
tupleVariationCount, pointCount,
sharedTuples, data, 4, offsetToData
)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_g_v_a_r.py",
"copies": "4",
"size": "7809",
"license": "apache-2.0",
"hash": 7218267838003962000,
"line_mean": 31.9493670886,
"line_max": 117,
"alpha_frac": 0.7155845819,
"autogenerated": false,
"ratio": 3.18734693877551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03249161212455374,
"num_lines": 237
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import array
import sys
Gloc_header = '''
> # big endian
version: 16.16F # Table version
flags: H # bit 0: 1=long format, 0=short format
# bit 1: 1=attribute names, 0=no names
numAttribs: H # NUmber of attributes
'''
class table_G__l_o_c(DefaultTable.DefaultTable):
"""
Support Graphite Gloc tables
"""
dependencies = ['Glat']
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.attribIds = None
self.numAttribs = 0
def decompile(self, data, ttFont):
_, data = sstruct.unpack2(Gloc_header, data, self)
flags = self.flags
del self.flags
self.locations = array.array('I' if flags & 1 else 'H')
self.locations.frombytes(data[:len(data) - self.numAttribs * (flags & 2)])
if sys.byteorder != "big": self.locations.byteswap()
self.attribIds = array.array('H')
if flags & 2:
self.attribIds.frombytes(data[-self.numAttribs * 2:])
if sys.byteorder != "big": self.attribIds.byteswap()
def compile(self, ttFont):
data = sstruct.pack(Gloc_header, dict(version=1.0,
flags=(bool(self.attribIds) << 1) + (self.locations.typecode == 'I'),
numAttribs=self.numAttribs))
if sys.byteorder != "big": self.locations.byteswap()
data += self.locations.tobytes()
if sys.byteorder != "big": self.locations.byteswap()
if self.attribIds:
if sys.byteorder != "big": self.attribIds.byteswap()
data += self.attribIds.tobytes()
if sys.byteorder != "big": self.attribIds.byteswap()
return data
def set(self, locations):
long_format = max(locations) >= 65536
self.locations = array.array('I' if long_format else 'H', locations)
def toXML(self, writer, ttFont):
writer.simpletag("attributes", number=self.numAttribs)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'attributes':
self.numAttribs = int(safeEval(attrs['number']))
def __getitem__(self, index):
return self.locations[index]
def __len__(self):
return len(self.locations)
def __iter__(self):
return iter(self.locations)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G__l_o_c.py",
"copies": "5",
"size": "2444",
"license": "apache-2.0",
"hash": -8539774471890281000,
"line_mean": 33.4225352113,
"line_max": 85,
"alpha_frac": 0.5981996727,
"autogenerated": false,
"ratio": 3.708649468892261,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6806849141592262,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
maxpFormat_0_5 = """
> # big endian
tableVersion: i
numGlyphs: H
"""
maxpFormat_1_0_add = """
> # big endian
maxPoints: H
maxContours: H
maxCompositePoints: H
maxCompositeContours: H
maxZones: H
maxTwilightPoints: H
maxStorage: H
maxFunctionDefs: H
maxInstructionDefs: H
maxStackElements: H
maxSizeOfInstructions: H
maxComponentElements: H
maxComponentDepth: H
"""
class table__m_a_x_p(DefaultTable.DefaultTable):
dependencies = ['glyf']
def decompile(self, data, ttFont):
dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
self.numGlyphs = int(self.numGlyphs)
if self.tableVersion != 0x00005000:
dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
assert len(data) == 0
def compile(self, ttFont):
if 'glyf' in ttFont:
if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
self.recalc(ttFont)
else:
pass # CFF
self.numGlyphs = len(ttFont.getGlyphOrder())
if self.tableVersion != 0x00005000:
self.tableVersion = 0x00010000
data = sstruct.pack(maxpFormat_0_5, self)
if self.tableVersion == 0x00010000:
data = data + sstruct.pack(maxpFormat_1_0_add, self)
return data
def recalc(self, ttFont):
"""Recalculate the font bounding box, and most other maxp values except
for the TT instructions values. Also recalculate the value of bit 1
of the flags field and the font bounding box of the 'head' table.
"""
glyfTable = ttFont['glyf']
hmtxTable = ttFont['hmtx']
headTable = ttFont['head']
self.numGlyphs = len(glyfTable)
INFINITY = 100000
xMin = +INFINITY
yMin = +INFINITY
xMax = -INFINITY
yMax = -INFINITY
maxPoints = 0
maxContours = 0
maxCompositePoints = 0
maxCompositeContours = 0
maxComponentElements = 0
maxComponentDepth = 0
allXMinIsLsb = 1
for glyphName in ttFont.getGlyphOrder():
g = glyfTable[glyphName]
if g.numberOfContours:
if hmtxTable[glyphName][1] != g.xMin:
allXMinIsLsb = 0
xMin = min(xMin, g.xMin)
yMin = min(yMin, g.yMin)
xMax = max(xMax, g.xMax)
yMax = max(yMax, g.yMax)
if g.numberOfContours > 0:
nPoints, nContours = g.getMaxpValues()
maxPoints = max(maxPoints, nPoints)
maxContours = max(maxContours, nContours)
else:
nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
maxCompositePoints = max(maxCompositePoints, nPoints)
maxCompositeContours = max(maxCompositeContours, nContours)
maxComponentElements = max(maxComponentElements, len(g.components))
maxComponentDepth = max(maxComponentDepth, componentDepth)
if xMin == +INFINITY:
headTable.xMin = 0
headTable.yMin = 0
headTable.xMax = 0
headTable.yMax = 0
else:
headTable.xMin = xMin
headTable.yMin = yMin
headTable.xMax = xMax
headTable.yMax = yMax
self.maxPoints = maxPoints
self.maxContours = maxContours
self.maxCompositePoints = maxCompositePoints
self.maxCompositeContours = maxCompositeContours
self.maxComponentElements = maxComponentElements
self.maxComponentDepth = maxComponentDepth
if allXMinIsLsb:
headTable.flags = headTable.flags | 0x2
else:
headTable.flags = headTable.flags & ~0x2
def testrepr(self):
items = sorted(self.__dict__.items())
print(". . . . . . . . .")
for combo in items:
print(" %s: %s" % combo)
print(". . . . . . . . .")
def toXML(self, writer, ttFont):
if self.tableVersion != 0x00005000:
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
if self.tableVersion != 0x00005000:
formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
names = names + names_1_0
for name in names:
value = getattr(self, name)
if name == "tableVersion":
value = hex(value)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
setattr(self, name, safeEval(attrs["value"]))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_m_a_x_p.py",
"copies": "5",
"size": "4191",
"license": "apache-2.0",
"hash": 4595524123746714000,
"line_mean": 29.3695652174,
"line_max": 77,
"alpha_frac": 0.6848007635,
"autogenerated": false,
"ratio": 2.878434065934066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03276326356124329,
"num_lines": 138
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from .sbixGlyph import Glyph
import struct
sbixStrikeHeaderFormat = """
>
ppem: H # The PPEM for which this strike was designed (e.g., 9,
# 12, 24)
resolution: H # The screen resolution (in dpi) for which this strike
# was designed (e.g., 72)
"""
sbixGlyphDataOffsetFormat = """
>
glyphDataOffset: L # Offset from the beginning of the strike data record
# to data for the individual glyph
"""
sbixStrikeHeaderFormatSize = sstruct.calcsize(sbixStrikeHeaderFormat)
sbixGlyphDataOffsetFormatSize = sstruct.calcsize(sbixGlyphDataOffsetFormat)
class Strike(object):
def __init__(self, rawdata=None, ppem=0, resolution=72):
self.data = rawdata
self.ppem = ppem
self.resolution = resolution
self.glyphs = {}
def decompile(self, ttFont):
if self.data is None:
from fontTools import ttLib
raise ttLib.TTLibError
if len(self.data) < sbixStrikeHeaderFormatSize:
from fontTools import ttLib
raise(ttLib.TTLibError, "Strike header too short: Expected %x, got %x.") \
% (sbixStrikeHeaderFormatSize, len(self.data))
# read Strike header from raw data
sstruct.unpack(sbixStrikeHeaderFormat, self.data[:sbixStrikeHeaderFormatSize], self)
# calculate number of glyphs
firstGlyphDataOffset, = struct.unpack(">L", \
self.data[sbixStrikeHeaderFormatSize:sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize])
self.numGlyphs = (firstGlyphDataOffset - sbixStrikeHeaderFormatSize) // sbixGlyphDataOffsetFormatSize - 1
# ^ -1 because there's one more offset than glyphs
# build offset list for single glyph data offsets
self.glyphDataOffsets = []
for i in range(self.numGlyphs + 1): # + 1 because there's one more offset than glyphs
start = i * sbixGlyphDataOffsetFormatSize + sbixStrikeHeaderFormatSize
current_offset, = struct.unpack(">L", self.data[start:start + sbixGlyphDataOffsetFormatSize])
self.glyphDataOffsets.append(current_offset)
# iterate through offset list and slice raw data into glyph data records
for i in range(self.numGlyphs):
current_glyph = Glyph(rawdata=self.data[self.glyphDataOffsets[i]:self.glyphDataOffsets[i+1]], gid=i)
current_glyph.decompile(ttFont)
self.glyphs[current_glyph.glyphName] = current_glyph
del self.glyphDataOffsets
del self.numGlyphs
del self.data
def compile(self, ttFont):
self.glyphDataOffsets = b""
self.bitmapData = b""
glyphOrder = ttFont.getGlyphOrder()
# first glyph starts right after the header
currentGlyphDataOffset = sbixStrikeHeaderFormatSize + sbixGlyphDataOffsetFormatSize * (len(glyphOrder) + 1)
for glyphName in glyphOrder:
if glyphName in self.glyphs:
# we have glyph data for this glyph
current_glyph = self.glyphs[glyphName]
else:
# must add empty glyph data record for this glyph
current_glyph = Glyph(glyphName=glyphName)
current_glyph.compile(ttFont)
current_glyph.glyphDataOffset = currentGlyphDataOffset
self.bitmapData += current_glyph.rawdata
currentGlyphDataOffset += len(current_glyph.rawdata)
self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, current_glyph)
# add last "offset", really the end address of the last glyph data record
dummy = Glyph()
dummy.glyphDataOffset = currentGlyphDataOffset
self.glyphDataOffsets += sstruct.pack(sbixGlyphDataOffsetFormat, dummy)
# pack header
self.data = sstruct.pack(sbixStrikeHeaderFormat, self)
# add offsets and image data after header
self.data += self.glyphDataOffsets + self.bitmapData
def toXML(self, xmlWriter, ttFont):
xmlWriter.begintag("strike")
xmlWriter.newline()
xmlWriter.simpletag("ppem", value=self.ppem)
xmlWriter.newline()
xmlWriter.simpletag("resolution", value=self.resolution)
xmlWriter.newline()
glyphOrder = ttFont.getGlyphOrder()
for i in range(len(glyphOrder)):
if glyphOrder[i] in self.glyphs:
self.glyphs[glyphOrder[i]].toXML(xmlWriter, ttFont)
# TODO: what if there are more glyph data records than (glyf table) glyphs?
xmlWriter.endtag("strike")
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name in ["ppem", "resolution"]:
setattr(self, name, safeEval(attrs["value"]))
elif name == "glyph":
if "graphicType" in attrs:
myFormat = safeEval("'''" + attrs["graphicType"] + "'''")
else:
myFormat = None
if "glyphname" in attrs:
myGlyphName = safeEval("'''" + attrs["glyphname"] + "'''")
elif "name" in attrs:
myGlyphName = safeEval("'''" + attrs["name"] + "'''")
else:
from fontTools import ttLib
raise ttLib.TTLibError("Glyph must have a glyph name.")
if "originOffsetX" in attrs:
myOffsetX = safeEval(attrs["originOffsetX"])
else:
myOffsetX = 0
if "originOffsetY" in attrs:
myOffsetY = safeEval(attrs["originOffsetY"])
else:
myOffsetY = 0
current_glyph = Glyph(
glyphName=myGlyphName,
graphicType=myFormat,
originOffsetX=myOffsetX,
originOffsetY=myOffsetY,
)
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
current_glyph.fromXML(name, attrs, content, ttFont)
current_glyph.compile(ttFont)
self.glyphs[current_glyph.glyphName] = current_glyph
else:
from fontTools import ttLib
raise ttLib.TTLibError("can't handle '%s' element" % name)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/sbixStrike.py",
"copies": "5",
"size": "5381",
"license": "apache-2.0",
"hash": 5304069938728959000,
"line_mean": 35.3581081081,
"line_max": 109,
"alpha_frac": 0.7277457722,
"autogenerated": false,
"ratio": 3.2357185808779314,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6463464353077931,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from . import DefaultTable
from .sbixStrike import Strike
sbixHeaderFormat = """
>
version: H # Version number (set to 1)
flags: H # The only two bits used in the flags field are bits 0
# and 1. For historical reasons, bit 0 must always be 1.
# Bit 1 is a sbixDrawOutlines flag and is interpreted as
# follows:
# 0: Draw only 'sbix' bitmaps
# 1: Draw both 'sbix' bitmaps and outlines, in that
# order
numStrikes: L # Number of bitmap strikes to follow
"""
sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat)
sbixStrikeOffsetFormat = """
>
strikeOffset: L # Offset from begining of table to data for the
# individual strike
"""
sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat)
class table__s_b_i_x(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.version = 1
self.flags = 1
self.numStrikes = 0
self.strikes = {}
self.strikeOffsets = []
def decompile(self, data, ttFont):
# read table header
sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self)
# collect offsets to individual strikes in self.strikeOffsets
for i in range(self.numStrikes):
current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
offset_entry = sbixStrikeOffset()
sstruct.unpack(sbixStrikeOffsetFormat, \
data[current_offset:current_offset+sbixStrikeOffsetFormatSize], \
offset_entry)
self.strikeOffsets.append(offset_entry.strikeOffset)
# decompile Strikes
for i in range(self.numStrikes-1, -1, -1):
current_strike = Strike(rawdata=data[self.strikeOffsets[i]:])
data = data[:self.strikeOffsets[i]]
current_strike.decompile(ttFont)
#print " Strike length: %xh" % len(bitmapSetData)
#print "Number of Glyph entries:", len(current_strike.glyphs)
if current_strike.ppem in self.strikes:
from fontTools import ttLib
raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
self.strikes[current_strike.ppem] = current_strike
# after the glyph data records have been extracted, we don't need the offsets anymore
del self.strikeOffsets
del self.numStrikes
def compile(self, ttFont):
sbixData = b""
self.numStrikes = len(self.strikes)
sbixHeader = sstruct.pack(sbixHeaderFormat, self)
# calculate offset to start of first strike
setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
for si in sorted(self.strikes.keys()):
current_strike = self.strikes[si]
current_strike.compile(ttFont)
# append offset to this strike to table header
current_strike.strikeOffset = setOffset
sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
setOffset += len(current_strike.data)
sbixData += current_strike.data
return sbixHeader + sbixData
def toXML(self, xmlWriter, ttFont):
xmlWriter.simpletag("version", value=self.version)
xmlWriter.newline()
xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
xmlWriter.newline()
for i in sorted(self.strikes.keys()):
self.strikes[i].toXML(xmlWriter, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name =="version":
setattr(self, name, safeEval(attrs["value"]))
elif name == "flags":
setattr(self, name, binary2num(attrs["value"]))
elif name == "strike":
current_strike = Strike()
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
current_strike.fromXML(name, attrs, content, ttFont)
self.strikes[current_strike.ppem] = current_strike
else:
from fontTools import ttLib
raise ttLib.TTLibError("can't handle '%s' element" % name)
# Helper classes
class sbixStrikeOffset(object):
pass
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_s_b_i_x.py",
"copies": "5",
"size": "3877",
"license": "apache-2.0",
"hash": 5220686403355826000,
"line_mean": 32.7130434783,
"line_max": 87,
"alpha_frac": 0.7211761671,
"autogenerated": false,
"ratio": 3.170073589533933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02904520820162383,
"num_lines": 115
} |
from fontTools.misc import transform
from fontParts.base.base import (
BaseObject,
TransformationMixin,
PointPositionMixin,
SelectionMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.color import Color
from fontParts.base.deprecated import DeprecatedImage, RemovedImage
class BaseImage(
BaseObject,
TransformationMixin,
PointPositionMixin,
SelectionMixin,
DeprecatedImage,
RemovedImage
):
copyAttributes = (
"transformation",
"color",
"data"
)
def _reprContents(self):
contents = [
"offset='({x}, {y})'".format(x=self.offset[0], y=self.offset[1]),
]
if self.color:
contents.append("color=%r" % str(self.color))
if self.glyph is not None:
contents.append("in glyph")
contents += self.glyph._reprContents()
return contents
def __bool__(self):
if self.data is None:
return False
elif len(self.data) == 0:
return False
else:
return True
__nonzero__ = __bool__
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph", "The image's parent :class:`BaseGlyph`.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._glyph is not None:
raise AssertionError("glyph for image already set")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Layer
layer = dynamicProperty("layer", "The image's parent :class:`BaseLayer`.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The image's parent :class:`BaseFont`.")
def _get_font(self):
if self._glyph is None:
return None
return self.glyph.font
# ----------
# Attributes
# ----------
# Transformation
transformation = dynamicProperty(
"base_transformation",
"""
The image's :ref:`type-transformation`.
This defines the image's position, scale,
and rotation. ::
>>> image.transformation
(1, 0, 0, 1, 0, 0)
>>> image.transformation = (2, 0, 0, 2, 100, -50)
"""
)
def _get_base_transformation(self):
value = self._get_transformation()
value = normalizers.normalizeTransformationMatrix(value)
return value
def _set_base_transformation(self, value):
value = normalizers.normalizeTransformationMatrix(value)
self._set_transformation(value)
def _get_transformation(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_transformation(self, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
offset = dynamicProperty(
"base_offset",
"""
The image's offset. This is a shortcut to the offset
values in :attr:`transformation`. This must be an
iterable containing two :ref:`type-int-float` values
defining the x and y values to offset the image by. ::
>>> image.offset
(0, 0)
>>> image.offset = (100, -50)
"""
)
def _get_base_offset(self):
value = self._get_offset()
value = normalizers.normalizeTransformationOffset(value)
return value
def _set_base_offset(self, value):
value = normalizers.normalizeTransformationOffset(value)
self._set_offset(value)
def _get_offset(self):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
return (ox, oy)
def _set_offset(self, value):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
ox, oy = value
self.transformation = (sx, sxy, syx, sy, ox, oy)
scale = dynamicProperty(
"base_scale",
"""
The image's scale. This is a shortcut to the scale
values in :attr:`transformation`. This must be an
iterable containing two :ref:`type-int-float` values
defining the x and y values to scale the image by. ::
>>> image.scale
(1, 1)
>>> image.scale = (2, 2)
"""
)
def _get_base_scale(self):
value = self._get_scale()
value = normalizers.normalizeTransformationScale(value)
return value
def _set_base_scale(self, value):
value = normalizers.normalizeTransformationScale(value)
self._set_scale(value)
def _get_scale(self):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
return (sx, sy)
def _set_scale(self, value):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
sx, sy = value
self.transformation = (sx, sxy, syx, sy, ox, oy)
# Color
color = dynamicProperty(
"base_color",
"""
The image's color. This will be a
:ref:`type-color` or ``None``. ::
>>> image.color
None
>>> image.color = (1, 0, 0, 0.5)
"""
)
def _get_base_color(self):
value = self._get_color()
if value is not None:
value = normalizers.normalizeColor(value)
value = Color(value)
return value
def _set_base_color(self, value):
if value is not None:
value = normalizers.normalizeColor(value)
self._set_color(value)
def _get_color(self):
"""
Return the color value as a color tuple or None.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_color(self, value):
"""
value will be a color tuple or None.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# Data
data = dynamicProperty(
"data",
"""
The image's raw byte data. The possible
formats are defined by each environment.
"""
)
def _get_base_data(self):
return self._get_data()
def _set_base_data(self, value):
self._set_data(value)
def _get_data(self):
"""
This must return raw byte data.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_data(self, value):
"""
value will be raw byte data.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
transformation = t.transform(self.transformation)
self.transformation = tuple(transformation)
# -------------
# Normalization
# -------------
def round(self):
"""
Round offset coordinates.
"""
self._round()
def _round(self):
"""
Subclasses may override this method.
"""
x, y = self.offset
x = normalizers.normalizeRounding(x)
y = normalizers.normalizeRounding(y)
self.offset = (x, y)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/image.py",
"copies": "1",
"size": "7838",
"license": "mit",
"hash": -6608405628462105000,
"line_mean": 24.2838709677,
"line_max": 78,
"alpha_frac": 0.5463128349,
"autogenerated": false,
"ratio": 4.337576092971776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5383888927871776,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import transform
from fontParts.base.base import (
BaseObject,
TransformationMixin,
PointPositionMixin,
SelectionMixin,
IdentifierMixin,
dynamicProperty,
reference
)
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedPoint, RemovedPoint
class BasePoint(
BaseObject,
TransformationMixin,
PointPositionMixin,
SelectionMixin,
IdentifierMixin,
DeprecatedPoint,
RemovedPoint
):
"""
A point object. This object is almost always
created with :meth:`BaseContour.appendPoint`,
the pen returned by :meth:`BaseGlyph.getPen`
or the point pen returned by :meth:`BaseGLyph.getPointPen`.
An orphan point can be created like this::
>>> point = RPoint()
"""
copyAttributes = (
"type",
"smooth",
"x",
"y",
"name"
)
def _reprContents(self):
contents = [
"%s" % self.type,
("({x}, {y})".format(x=self.x, y=self.y)),
]
if self.name is not None:
contents.append("name='%s'" % self.name)
if self.smooth:
contents.append("smooth=%r" % self.smooth)
return contents
# -------
# Parents
# -------
# Contour
_contour = None
contour = dynamicProperty("contour",
"The point's parent :class:`BaseContour`.")
def _get_contour(self):
if self._contour is None:
return None
return self._contour()
def _set_contour(self, contour):
if self._contour is not None:
raise AssertionError("contour for point already set")
if contour is not None:
contour = reference(contour)
self._contour = contour
# Glyph
glyph = dynamicProperty("glyph", "The point's parent :class:`BaseGlyph`.")
def _get_glyph(self):
if self._contour is None:
return None
return self.contour.glyph
# Layer
layer = dynamicProperty("layer", "The point's parent :class:`BaseLayer`.")
def _get_layer(self):
if self._contour is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The point's parent :class:`BaseFont`.")
def _get_font(self):
if self._contour is None:
return None
return self.glyph.font
# ----------
# Attributes
# ----------
# type
type = dynamicProperty(
"base_type",
"""
The point type defined with a :ref:`type-string`.
The possible types are:
+----------+---------------------------------+
| move | An on-curve move to. |
+----------+---------------------------------+
| line | An on-curve line to. |
+----------+---------------------------------+
| curve | An on-curve cubic curve to. |
+----------+---------------------------------+
| qcurve | An on-curve quadratic curve to. |
+----------+---------------------------------+
| offcurve | An off-curve. |
+----------+---------------------------------+
""")
def _get_base_type(self):
value = self._get_type()
value = normalizers.normalizePointType(value)
return value
def _set_base_type(self, value):
value = normalizers.normalizePointType(value)
self._set_type(value)
def _get_type(self):
"""
This is the environment implementation
of :attr:`BasePoint.type`. This must
return a :ref:`type-string` defining
the point type.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_type(self, value):
"""
This is the environment implementation
of :attr:`BasePoint.type`. **value**
will be a :ref:`type-string` defining
the point type. It will have been normalized
with :func:`normalizers.normalizePointType`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# smooth
smooth = dynamicProperty(
"base_smooth",
"""
A ``bool`` indicating if the point is smooth or not. ::
>>> point.smooth
False
>>> point.smooth = True
"""
)
def _get_base_smooth(self):
value = self._get_smooth()
value = normalizers.normalizeBoolean(value)
return value
def _set_base_smooth(self, value):
value = normalizers.normalizeBoolean(value)
self._set_smooth(value)
def _get_smooth(self):
"""
This is the environment implementation of
:attr:`BasePoint.smooth`. This must return
a ``bool`` indicating the smooth state.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_smooth(self, value):
"""
This is the environment implementation of
:attr:`BasePoint.smooth`. **value** will
be a ``bool`` indicating the smooth state.
It will have been normalized with
:func:`normalizers.normalizeBoolean`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# x
x = dynamicProperty(
"base_x",
"""
The x coordinate of the point.
It must be an :ref:`type-int-float`. ::
>>> point.x
100
>>> point.x = 101
"""
)
def _get_base_x(self):
value = self._get_x()
value = normalizers.normalizeX(value)
return value
def _set_base_x(self, value):
value = normalizers.normalizeX(value)
self._set_x(value)
def _get_x(self):
"""
This is the environment implementation of
:attr:`BasePoint.x`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_x(self, value):
"""
This is the environment implementation of
:attr:`BasePoint.x`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# y
y = dynamicProperty(
"base_y",
"""
The y coordinate of the point.
It must be an :ref:`type-int-float`. ::
>>> point.y
100
>>> point.y = 101
"""
)
def _get_base_y(self):
value = self._get_y()
value = normalizers.normalizeY(value)
return value
def _set_base_y(self, value):
value = normalizers.normalizeY(value)
self._set_y(value)
def _get_y(self):
"""
This is the environment implementation of
:attr:`BasePoint.y`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_y(self, value):
"""
This is the environment implementation of
:attr:`BasePoint.y`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Identification
# --------------
# index
index = dynamicProperty(
"base_index",
"""
The index of the point within the ordered
list of the parent glyph's point. This
attribute is read only. ::
>>> point.index
0
"""
)
def _get_base_index(self):
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _get_index(self):
"""
Get the point's index.
This must return an ``int``.
Subclasses may override this method.
"""
contour = self.contour
if contour is None:
return None
return contour.points.index(self)
# name
name = dynamicProperty(
"base_name",
"""
The name of the point. This will be a
:ref:`type-string` or ``None``.
>>> point.name
'my point'
>>> point.name = None
"""
)
def _get_base_name(self):
value = self._get_name()
if value is not None:
value = normalizers.normalizePointName(value)
return value
def _set_base_name(self, value):
if value is not None:
value = normalizers.normalizePointName(value)
self._set_name(value)
def _get_name(self):
"""
This is the environment implementation of
:attr:`BasePoint.name`. This must return a
:ref:`type-string` or ``None``. The returned
value will be normalized with
:func:`normalizers.normalizePointName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_name(self, value):
"""
This is the environment implementation of
:attr:`BasePoint.name`. **value** will be
a :ref:`type-string` or ``None``. It will
have been normalized with
:func:`normalizers.normalizePointName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
This is the environment implementation of
:meth:`BasePoint.transformBy`.
**matrix** will be a :ref:`type-transformation`.
that has been normalized with
:func:`normalizers.normalizeTransformationMatrix`.
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
x, y = t.transformPoint((self.x, self.y))
self.x = x
self.y = y
# -------------
# Normalization
# -------------
def round(self):
"""
Round the point's coordinate.
>>> point.round()
This applies to the following:
* x
* y
"""
self._round()
def _round(self, **kwargs):
"""
This is the environment implementation of
:meth:`BasePoint.round`.
Subclasses may override this method.
"""
self.x = normalizers.normalizeRounding(self.x)
self.y = normalizers.normalizeRounding(self.y)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/point.py",
"copies": "1",
"size": "10725",
"license": "mit",
"hash": 7361228647517777000,
"line_mean": 24.4750593824,
"line_max": 78,
"alpha_frac": 0.526993007,
"autogenerated": false,
"ratio": 4.523407844791227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 421
} |
from fontTools.misc import transform
from fontParts.base.base import (
BaseObject,
TransformationMixin,
SelectionMixin,
IdentifierMixin,
dynamicProperty,
reference
)
from fontParts.base.errors import FontPartsError
from fontParts.base import normalizers
from fontParts.base.deprecated import DeprecatedBPoint, RemovedBPoint
class BaseBPoint(
BaseObject,
TransformationMixin,
SelectionMixin,
DeprecatedBPoint,
IdentifierMixin,
RemovedBPoint
):
def _reprContents(self):
contents = [
"%s" % self.type,
"anchor='({x}, {y})'".format(x=self.anchor[0], y=self.anchor[1]),
]
return contents
def _setPoint(self, point):
if hasattr(self, "_point"):
raise AssertionError("point for bPoint already set")
self._point = point
def __eq__(self, other):
if hasattr(other, "_point"):
return self._point == other._point
return NotImplemented
# this class should not be used in hashable
# collections since it is dynamically generated.
__hash__ = None
# -------
# Parents
# -------
# identifier
def _get_identifier(self):
"""
Subclasses may override this method.
"""
return self._point.identifier
def _getIdentifier(self):
"""
Subclasses may override this method.
"""
return self._point.getIdentifier()
# Segment
_segment = dynamicProperty("base_segment")
def _get_base_segment(self):
point = self._point
for segment in self.contour.segments:
if segment.onCurve == point:
return segment
_nextSegment = dynamicProperty("base_nextSegment")
def _get_base_nextSegment(self):
contour = self.contour
if contour is None:
return None
segments = contour.segments
segment = self._segment
i = segments.index(segment) + 1
if i >= len(segments):
i = i % len(segments)
nextSegment = segments[i]
return nextSegment
# Contour
_contour = None
contour = dynamicProperty("contour", "The bPoint's parent contour.")
def _get_contour(self):
if self._contour is None:
return None
return self._contour()
def _set_contour(self, contour):
if self._contour is not None:
raise AssertionError("contour for bPoint already set")
if contour is not None:
contour = reference(contour)
self._contour = contour
# Glyph
glyph = dynamicProperty("glyph", "The bPoint's parent glyph.")
def _get_glyph(self):
if self._contour is None:
return None
return self.contour.glyph
# Layer
layer = dynamicProperty("layer", "The bPoint's parent layer.")
def _get_layer(self):
if self._contour is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The bPoint's parent font.")
def _get_font(self):
if self._contour is None:
return None
return self.glyph.font
# ----------
# Attributes
# ----------
# anchor
anchor = dynamicProperty("base_anchor", "The anchor point.")
def _get_base_anchor(self):
value = self._get_anchor()
value = normalizers.normalizeCoordinateTuple(value)
return value
def _set_base_anchor(self, value):
value = normalizers.normalizeCoordinateTuple(value)
self._set_anchor(value)
def _get_anchor(self):
"""
Subclasses may override this method.
"""
point = self._point
return (point.x, point.y)
def _set_anchor(self, value):
"""
Subclasses may override this method.
"""
pX, pY = self.anchor
x, y = value
dX = x - pX
dY = y - pY
self.moveBy((dX, dY))
# bcp in
bcpIn = dynamicProperty("base_bcpIn", "The incoming off curve.")
def _get_base_bcpIn(self):
value = self._get_bcpIn()
value = normalizers.normalizeCoordinateTuple(value)
return value
def _set_base_bcpIn(self, value):
value = normalizers.normalizeCoordinateTuple(value)
self._set_bcpIn(value)
def _get_bcpIn(self):
"""
Subclasses may override this method.
"""
segment = self._segment
offCurves = segment.offCurve
if offCurves:
bcp = offCurves[-1]
x, y = relativeBCPIn(self.anchor, (bcp.x, bcp.y))
else:
x = y = 0
return (x, y)
def _set_bcpIn(self, value):
"""
Subclasses may override this method.
"""
x, y = absoluteBCPIn(self.anchor, value)
segment = self._segment
if segment.type == "move" and value != (0, 0):
raise FontPartsError(("Cannot set the bcpIn for the first "
"point in an open contour.")
)
else:
offCurves = segment.offCurve
if offCurves:
# if the two off curves are located at the anchor
# coordinates we can switch to a line segment type.
if value == (0, 0) and self.bcpOut == (0, 0):
segment.type = "line"
segment.smooth = False
else:
offCurves[-1].x = x
offCurves[-1].y = y
elif value != (0, 0):
segment.type = "curve"
offCurves = segment.offCurve
offCurves[-1].x = x
offCurves[-1].y = y
# bcp out
bcpOut = dynamicProperty("base_bcpOut", "The outgoing off curve.")
def _get_base_bcpOut(self):
value = self._get_bcpOut()
value = normalizers.normalizeCoordinateTuple(value)
return value
def _set_base_bcpOut(self, value):
value = normalizers.normalizeCoordinateTuple(value)
self._set_bcpOut(value)
def _get_bcpOut(self):
"""
Subclasses may override this method.
"""
nextSegment = self._nextSegment
offCurves = nextSegment.offCurve
if offCurves:
bcp = offCurves[0]
x, y = relativeBCPOut(self.anchor, (bcp.x, bcp.y))
else:
x = y = 0
return (x, y)
def _set_bcpOut(self, value):
"""
Subclasses may override this method.
"""
x, y = absoluteBCPOut(self.anchor, value)
segment = self._segment
nextSegment = self._nextSegment
if nextSegment.type == "move" and value != (0, 0):
raise FontPartsError(("Cannot set the bcpOut for the last "
"point in an open contour.")
)
else:
offCurves = nextSegment.offCurve
if offCurves:
# if the off curves are located at the anchor coordinates
# we can switch to a "line" segment type
if value == (0, 0) and self.bcpIn == (0, 0):
segment.type = "line"
segment.smooth = False
else:
offCurves[0].x = x
offCurves[0].y = y
elif value != (0, 0):
nextSegment.type = "curve"
offCurves = nextSegment.offCurve
offCurves[0].x = x
offCurves[0].y = y
# type
type = dynamicProperty("base_type", "The bPoint type.")
def _get_base_type(self):
value = self._get_type()
value = normalizers.normalizeBPointType(value)
return value
def _set_base_type(self, value):
value = normalizers.normalizeBPointType(value)
self._set_type(value)
def _get_type(self):
"""
Subclasses may override this method.
"""
point = self._point
typ = point.type
bType = None
if point.smooth:
if typ == "curve":
bType = "curve"
elif typ == "line":
nextSegment = self._nextSegment
if nextSegment is not None and nextSegment.type == "curve":
bType = "curve"
else:
bType = "corner"
elif typ in ("move", "line", "curve"):
bType = "corner"
if bType is None:
raise FontPartsError("A %s point can not be converted to a bPoint."
% typ)
return bType
def _set_type(self, value):
"""
Subclasses may override this method.
"""
point = self._point
# convert corner to curve
if value == "curve" and point.type == "line":
# This needs to insert off curves without
# generating unnecessary points in the
# following segment. The segment object
# implements this logic, so delegate the
# change to the corresponding segment.
segment = self._segment
segment.type = "curve"
segment.smooth = True
# convert curve to corner
elif value == "corner" and point.type == "curve":
point.smooth = False
# --------------
# Identification
# --------------
index = dynamicProperty("index",
("The index of the bPoint within the ordered "
"list of the parent contour's bPoints. None "
"if the bPoint does not belong to a contour.")
)
def _get_base_index(self):
if self.contour is None:
return None
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _get_index(self):
"""
Subclasses may override this method.
"""
contour = self.contour
value = contour.bPoints.index(self)
return value
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
anchor = self.anchor
bcpIn = absoluteBCPIn(anchor, self.bcpIn)
bcpOut = absoluteBCPOut(anchor, self.bcpOut)
points = [bcpIn, anchor, bcpOut]
t = transform.Transform(*matrix)
bcpIn, anchor, bcpOut = t.transformPoints(points)
x, y = anchor
self._point.x = x
self._point.y = y
self.bcpIn = relativeBCPIn(anchor, bcpIn)
self.bcpOut = relativeBCPOut(anchor, bcpOut)
# ----
# Misc
# ----
def round(self):
"""
Round coordinates.
"""
x, y = self.anchor
self.anchor = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
x, y = self.bcpIn
self.bcpIn = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
x, y = self.bcpOut
self.bcpOut = (normalizers.normalizeRounding(x),
normalizers.normalizeRounding(y))
def relativeBCPIn(anchor, BCPIn):
"""convert absolute incoming bcp value to a relative value"""
return (BCPIn[0] - anchor[0], BCPIn[1] - anchor[1])
def absoluteBCPIn(anchor, BCPIn):
"""convert relative incoming bcp value to an absolute value"""
return (BCPIn[0] + anchor[0], BCPIn[1] + anchor[1])
def relativeBCPOut(anchor, BCPOut):
"""convert absolute outgoing bcp value to a relative value"""
return (BCPOut[0] - anchor[0], BCPOut[1] - anchor[1])
def absoluteBCPOut(anchor, BCPOut):
"""convert relative outgoing bcp value to an absolute value"""
return (BCPOut[0] + anchor[0], BCPOut[1] + anchor[1])
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/bPoint.py",
"copies": "1",
"size": "12012",
"license": "mit",
"hash": 9082269576670819000,
"line_mean": 28.2262773723,
"line_max": 79,
"alpha_frac": 0.5357975358,
"autogenerated": false,
"ratio": 4.2221441124780315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005068937550689375,
"num_lines": 411
} |
from fontTools.misc import transform
from fontParts.base import normalizers
from fontParts.base.base import (
BaseObject, TransformationMixin, InterpolationMixin, SelectionMixin,
PointPositionMixin, IdentifierMixin, dynamicProperty, reference
)
from fontParts.base.compatibility import AnchorCompatibilityReporter
from fontParts.base.color import Color
from fontParts.base.deprecated import DeprecatedAnchor, RemovedAnchor
class BaseAnchor(
BaseObject,
TransformationMixin,
DeprecatedAnchor,
RemovedAnchor,
PointPositionMixin,
InterpolationMixin,
SelectionMixin,
IdentifierMixin
):
"""
An anchor object. This object is almost always
created with :meth:`BaseGlyph.appendAnchor`.
An orphan anchor can be created like this::
>>> anchor = RAnchor()
"""
def _reprContents(self):
contents = [
("({x}, {y})".format(x=self.x, y=self.y)),
]
if self.name is not None:
contents.append("name='%s'" % self.name)
if self.color:
contents.append("color=%r" % str(self.color))
return contents
# ----
# Copy
# ----
copyAttributes = (
"x",
"y",
"name",
"color"
)
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph", "The anchor's parent :class:`BaseGlyph`.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._glyph is not None:
raise AssertionError("glyph for anchor already set")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Layer
layer = dynamicProperty("layer", "The anchor's parent :class:`BaseLayer`.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The anchor's parent :class:`BaseFont`.")
def _get_font(self):
if self._glyph is None:
return None
return self.glyph.font
# --------
# Position
# --------
# x
x = dynamicProperty(
"base_x",
"""
The x coordinate of the anchor.
It must be an :ref:`type-int-float`. ::
>>> anchor.x
100
>>> anchor.x = 101
"""
)
def _get_base_x(self):
value = self._get_x()
value = normalizers.normalizeX(value)
return value
def _set_base_x(self, value):
value = normalizers.normalizeX(value)
self._set_x(value)
def _get_x(self):
"""
This is the environment implementation of
:attr:`BaseAnchor.x`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_x(self, value):
"""
This is the environment implementation of
:attr:`BaseAnchor.x`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# y
y = dynamicProperty(
"base_y",
"""
The y coordinate of the anchor.
It must be an :ref:`type-int-float`. ::
>>> anchor.y
100
>>> anchor.y = 101
"""
)
def _get_base_y(self):
value = self._get_y()
value = normalizers.normalizeY(value)
return value
def _set_base_y(self, value):
value = normalizers.normalizeY(value)
self._set_y(value)
def _get_y(self):
"""
This is the environment implementation of
:attr:`BaseAnchor.y`. This must return an
:ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_y(self, value):
"""
This is the environment implementation of
:attr:`BaseAnchor.y`. **value** will be
an :ref:`type-int-float`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Identification
# --------------
# index
index = dynamicProperty(
"base_index",
"""
The index of the anchor within the ordered
list of the parent glyph's anchors. This
attribute is read only. ::
>>> anchor.index
0
"""
)
def _get_base_index(self):
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _get_index(self):
"""
Get the anchor's index.
This must return an ``int``.
Subclasses may override this method.
"""
glyph = self.glyph
if glyph is None:
return None
return glyph.anchors.index(self)
# name
name = dynamicProperty(
"base_name",
"""
The name of the anchor. This will be a
:ref:`type-string` or ``None``.
>>> anchor.name
'my anchor'
>>> anchor.name = None
"""
)
def _get_base_name(self):
value = self._get_name()
value = normalizers.normalizeAnchorName(value)
return value
def _set_base_name(self, value):
value = normalizers.normalizeAnchorName(value)
self._set_name(value)
def _get_name(self):
"""
This is the environment implementation of
:attr:`BaseAnchor.name`. This must return a
:ref:`type-string` or ``None``. The returned
value will be normalized with
:func:`normalizers.normalizeAnchorName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_name(self, value):
"""
This is the environment implementation of
:attr:`BaseAnchor.name`. **value** will be
a :ref:`type-string` or ``None``. It will
have been normalized with
:func:`normalizers.normalizeAnchorName`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# color
color = dynamicProperty(
"base_color",
"""
The anchor's color. This will be a
:ref:`type-color` or ``None``. ::
>>> anchor.color
None
>>> anchor.color = (1, 0, 0, 0.5)
"""
)
def _get_base_color(self):
value = self._get_color()
if value is not None:
value = normalizers.normalizeColor(value)
value = Color(value)
return value
def _set_base_color(self, value):
if value is not None:
value = normalizers.normalizeColor(value)
self._set_color(value)
def _get_color(self):
"""
This is the environment implementation of
:attr:`BaseAnchor.color`. This must return
a :ref:`type-color` or ``None``. The
returned value will be normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_color(self, value):
"""
This is the environment implementation of
:attr:`BaseAnchor.color`. **value** will
be a :ref:`type-color` or ``None``.
It will have been normalized with
:func:`normalizers.normalizeColor`.
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
This is the environment implementation of
:meth:`BaseAnchor.transformBy`.
**matrix** will be a :ref:`type-transformation`.
that has been normalized with
:func:`normalizers.normalizeTransformationMatrix`.
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
x, y = t.transformPoint((self.x, self.y))
self.x = x
self.y = y
# -------------
# Interpolation
# -------------
compatibilityReporterClass = AnchorCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compatible, report = self.isCompatible(otherAnchor)
>>> compatible
True
>>> compatible
[Warning] Anchor: "left" + "right"
[Warning] Anchor: "left" has name left | "right" has name right
This will return a ``bool`` indicating if the anchor is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseAnchor, self).isCompatible(other, BaseAnchor)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseAnchor.isCompatible`.
Subclasses may override this method.
"""
anchor1 = self
anchor2 = other
# base names
if anchor1.name != anchor2.name:
reporter.nameDifference = True
reporter.warning = True
# -------------
# Normalization
# -------------
def round(self):
"""
Round the anchor's coordinate.
>>> anchor.round()
This applies to the following:
* x
* y
"""
self._round()
def _round(self):
"""
This is the environment implementation of
:meth:`BaseAnchor.round`.
Subclasses may override this method.
"""
self.x = normalizers.normalizeRounding(self.x)
self.y = normalizers.normalizeRounding(self.y)
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/anchor.py",
"copies": "1",
"size": "10042",
"license": "mit",
"hash": 3567398996134753000,
"line_mean": 24.3585858586,
"line_max": 79,
"alpha_frac": 0.5468034256,
"autogenerated": false,
"ratio": 4.489047831917747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5535851257517748,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import transform
from fontParts.base import normalizers
from fontParts.base.errors import FontPartsError
from fontParts.base.base import (
BaseObject,
TransformationMixin,
InterpolationMixin,
PointPositionMixin,
SelectionMixin,
IdentifierMixin,
dynamicProperty,
reference
)
from fontParts.base.compatibility import ComponentCompatibilityReporter
from fontParts.base.deprecated import DeprecatedComponent, RemovedComponent
class BaseComponent(
BaseObject,
TransformationMixin,
PointPositionMixin,
InterpolationMixin,
SelectionMixin,
IdentifierMixin,
DeprecatedComponent,
RemovedComponent
):
copyAttributes = (
"baseGlyph",
"transformation"
)
def _reprContents(self):
contents = [
"baseGlyph='%s'" % self.baseGlyph,
"offset='({x}, {y})'".format(x=self.offset[0], y=self.offset[1]),
]
if self.glyph is not None:
contents.append("in glyph")
contents += self.glyph._reprContents()
return contents
# -------
# Parents
# -------
# Glyph
_glyph = None
glyph = dynamicProperty("glyph", "The component's parent glyph.")
def _get_glyph(self):
if self._glyph is None:
return None
return self._glyph()
def _set_glyph(self, glyph):
if self._glyph is not None:
raise AssertionError("glyph for component already set")
if glyph is not None:
glyph = reference(glyph)
self._glyph = glyph
# Layer
layer = dynamicProperty("layer", "The component's parent layer.")
def _get_layer(self):
if self._glyph is None:
return None
return self.glyph.layer
# Font
font = dynamicProperty("font", "The component's parent font.")
def _get_font(self):
if self._glyph is None:
return None
return self.glyph.font
# ----------
# Attributes
# ----------
# baseGlyph
baseGlyph = dynamicProperty("base_baseGlyph",
"The glyph the component references.")
def _get_base_baseGlyph(self):
value = self._get_baseGlyph()
# if the component does not belong to a layer,
# it is allowed to have None as its baseGlyph
if value is None and self.layer is None:
pass
else:
value = normalizers.normalizeGlyphName(value)
return value
def _set_base_baseGlyph(self, value):
value = normalizers.normalizeGlyphName(value)
self._set_baseGlyph(value)
def _get_baseGlyph(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_baseGlyph(self, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# transformation
transformation = dynamicProperty("base_transformation",
"The component's transformation matrix.")
def _get_base_transformation(self):
value = self._get_transformation()
value = normalizers.normalizeTransformationMatrix(value)
return value
def _set_base_transformation(self, value):
value = normalizers.normalizeTransformationMatrix(value)
self._set_transformation(value)
def _get_transformation(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
def _set_transformation(self, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# offset
offset = dynamicProperty("base_offset", "The component's offset.")
def _get_base_offset(self):
value = self._get_offset()
value = normalizers.normalizeTransformationOffset(value)
return value
def _set_base_offset(self, value):
value = normalizers.normalizeTransformationOffset(value)
self._set_offset(value)
def _get_offset(self):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
return ox, oy
def _set_offset(self, value):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
ox, oy = value
self.transformation = sx, sxy, syx, sy, ox, oy
# scale
scale = dynamicProperty("base_scale", "The component's scale.")
def _get_base_scale(self):
value = self._get_scale()
value = normalizers.normalizeComponentScale(value)
return value
def _set_base_scale(self, value):
value = normalizers.normalizeComponentScale(value)
self._set_scale(value)
def _get_scale(self):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
return sx, sy
def _set_scale(self, value):
"""
Subclasses may override this method.
"""
sx, sxy, syx, sy, ox, oy = self.transformation
sx, sy = value
self.transformation = sx, sxy, syx, sy, ox, oy
# --------------
# Identification
# --------------
# index
index = dynamicProperty("base_index",
("The index of the component within the "
"ordered list of the parent glyph's components."))
def _get_base_index(self):
glyph = self.glyph
if glyph is None:
return None
value = self._get_index()
value = normalizers.normalizeIndex(value)
return value
def _set_base_index(self, value):
glyph = self.glyph
if glyph is None:
raise FontPartsError("The component does not belong to a glyph.")
value = normalizers.normalizeIndex(value)
componentCount = len(glyph.components)
if value < 0:
value = -(value % componentCount)
if value >= componentCount:
value = componentCount
self._set_index(value)
def _get_index(self):
"""
Subclasses may override this method.
"""
glyph = self.glyph
return glyph.components.index(self)
def _set_index(self, value):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# ----
# Pens
# ----
def draw(self, pen):
"""
Draw the component with the given Pen.
"""
self._draw(pen)
def _draw(self, pen, **kwargs):
"""
Subclasses may override this method.
"""
from fontTools.ufoLib.pointPen import PointToSegmentPen
adapter = PointToSegmentPen(pen)
self.drawPoints(adapter)
def drawPoints(self, pen):
"""
Draw the contour with the given PointPen.
"""
self._drawPoints(pen)
def _drawPoints(self, pen, **kwargs):
"""
Subclasses may override this method.
"""
# The try: ... except TypeError: ...
# handles backwards compatibility with
# point pens that have not been upgraded
# to point pen protocol 2.
try:
pen.addComponent(self.baseGlyph, self.transformation,
identifier=self.identifier, **kwargs)
except TypeError:
pen.addComponent(self.baseGlyph, self.transformation, **kwargs)
# --------------
# Transformation
# --------------
def _transformBy(self, matrix, **kwargs):
"""
Subclasses may override this method.
"""
t = transform.Transform(*matrix)
transformation = t.transform(self.transformation)
self.transformation = tuple(transformation)
# -------------
# Normalization
# -------------
def round(self):
"""
Round offset coordinates.
"""
self._round()
def _round(self):
"""
Subclasses may override this method.
"""
x, y = self.offset
x = normalizers.normalizeRounding(x)
y = normalizers.normalizeRounding(y)
self.offset = (x, y)
def decompose(self):
"""
Decompose the component.
"""
glyph = self.glyph
if glyph is None:
raise FontPartsError("The component does not belong to a glyph.")
self._decompose()
def _decompose(self):
"""
Subclasses must override this method.
"""
self.raiseNotImplementedError()
# -------------
# Interpolation
# -------------
compatibilityReporterClass = ComponentCompatibilityReporter
def isCompatible(self, other):
"""
Evaluate interpolation compatibility with **other**. ::
>>> compatible, report = self.isCompatible(otherComponent)
>>> compatible
True
>>> compatible
[Warning] Component: "A" + "B"
[Warning] Component: "A" has name A | "B" has name B
This will return a ``bool`` indicating if the component is
compatible for interpolation with **other** and a
:ref:`type-string` of compatibility notes.
"""
return super(BaseComponent, self).isCompatible(other, BaseComponent)
def _isCompatible(self, other, reporter):
"""
This is the environment implementation of
:meth:`BaseComponent.isCompatible`.
Subclasses may override this method.
"""
component1 = self
component2 = other
# base glyphs
if component1.baseName != component2.baseName:
reporter.baseDifference = True
reporter.warning = True
# ------------
# Data Queries
# ------------
def pointInside(self, point):
"""
Determine if point is in the black or white of the component.
point must be an (x, y) tuple.
"""
point = normalizers.normalizeCoordinateTuple(point)
return self._pointInside(point)
def _pointInside(self, point):
"""
Subclasses may override this method.
"""
from fontTools.pens.pointInsidePen import PointInsidePen
pen = PointInsidePen(glyphSet=self.layer, testPoint=point, evenOdd=False)
self.draw(pen)
return pen.getResult()
bounds = dynamicProperty("base_bounds",
("The bounds of the component: "
"(xMin, yMin, xMax, yMax) or None."))
def _get_base_bounds(self):
value = self._get_bounds()
if value is not None:
value = normalizers.normalizeBoundingBox(value)
return value
def _get_bounds(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.boundsPen import BoundsPen
pen = BoundsPen(self.layer)
self.draw(pen)
return pen.bounds
| {
"repo_name": "robofab-developers/fontParts",
"path": "Lib/fontParts/base/component.py",
"copies": "1",
"size": "11222",
"license": "mit",
"hash": 3147005493123733500,
"line_mean": 26.7772277228,
"line_max": 81,
"alpha_frac": 0.5661201212,
"autogenerated": false,
"ratio": 4.586023702492849,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5652143823692849,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc import xmlWriter
from fontTools.misc.py23 import Tag, byteord, tostr
from fontTools.misc.loggingTools import deprecateArgument
from fontTools.ttLib import TTLibError
from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
from io import BytesIO, StringIO
import os
import logging
import traceback
log = logging.getLogger(__name__)
class TTFont(object):
"""The main font object. It manages file input and output, and offers
a convenient way of accessing tables.
Tables will be only decompiled when necessary, ie. when they're actually
accessed. This means that simple operations can be extremely fast.
"""
def __init__(self, file=None, res_name_or_index=None,
sfntVersion="\000\001\000\000", flavor=None, checkChecksums=0,
verbose=None, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False,
recalcTimestamp=True, fontNumber=-1, lazy=None, quiet=None,
_tableCache=None):
"""The constructor can be called with a few different arguments.
When reading a font from disk, 'file' should be either a pathname
pointing to a file, or a readable file object.
It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt
resource name or an sfnt resource index number or zero. The latter
case will cause TTLib to autodetect whether the file is a flat file
or a suitcase. (If it's a suitcase, only the first 'sfnt' resource
will be read!)
The 'checkChecksums' argument is used to specify how sfnt
checksums are treated upon reading a file from disk:
0: don't check (default)
1: check, print warnings if a wrong checksum is found
2: check, raise an exception if a wrong checksum is found.
The TTFont constructor can also be called without a 'file'
argument: this is the way to create a new empty font.
In this case you can optionally supply the 'sfntVersion' argument,
and a 'flavor' which can be None, 'woff', or 'woff2'.
If the recalcBBoxes argument is false, a number of things will *not*
be recalculated upon save/compile:
1) 'glyf' glyph bounding boxes
2) 'CFF ' font bounding box
3) 'head' font bounding box
4) 'hhea' min/max values
5) 'vhea' min/max values
(1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-).
Additionally, upon importing an TTX file, this option cause glyphs
to be compiled right away. This should reduce memory consumption
greatly, and therefore should have some impact on the time needed
to parse/compile large fonts.
If the recalcTimestamp argument is false, the modified timestamp in the
'head' table will *not* be recalculated upon save/compile.
If the allowVID argument is set to true, then virtual GID's are
supported. Asking for a glyph ID with a glyph name or GID that is not in
the font will return a virtual GID. This is valid for GSUB and cmap
tables. For SING glyphlets, the cmap table is used to specify Unicode
values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested
and does not exist in the font, or the glyphname has the form glyphN
and does not exist in the font, then N is used as the virtual GID.
Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new
virtual GIDs, the next is one less than the previous.
If ignoreDecompileErrors is set to True, exceptions raised in
individual tables during decompilation will be ignored, falling
back to the DefaultTable implementation, which simply keeps the
binary data.
If lazy is set to True, many data structures are loaded lazily, upon
access only. If it is set to False, many data structures are loaded
immediately. The default is lazy=None which is somewhere in between.
"""
for name in ("verbose", "quiet"):
val = locals().get(name)
if val is not None:
deprecateArgument(name, "configure logging instead")
setattr(self, name, val)
self.lazy = lazy
self.recalcBBoxes = recalcBBoxes
self.recalcTimestamp = recalcTimestamp
self.tables = {}
self.reader = None
# Permit the user to reference glyphs that are not int the font.
self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value.
self.reverseVIDDict = {}
self.VIDDict = {}
self.allowVID = allowVID
self.ignoreDecompileErrors = ignoreDecompileErrors
if not file:
self.sfntVersion = sfntVersion
self.flavor = flavor
self.flavorData = None
return
if not hasattr(file, "read"):
closeStream = True
# assume file is a string
if res_name_or_index is not None:
# see if it contains 'sfnt' resources in the resource or data fork
from . import macUtils
if res_name_or_index == 0:
if macUtils.getSFNTResIndices(file):
# get the first available sfnt font.
file = macUtils.SFNTResourceReader(file, 1)
else:
file = open(file, "rb")
else:
file = macUtils.SFNTResourceReader(file, res_name_or_index)
else:
file = open(file, "rb")
else:
# assume "file" is a readable file object
closeStream = False
file.seek(0)
if not self.lazy:
# read input file in memory and wrap a stream around it to allow overwriting
file.seek(0)
tmp = BytesIO(file.read())
if hasattr(file, 'name'):
# save reference to input file name
tmp.name = file.name
if closeStream:
file.close()
file = tmp
self._tableCache = _tableCache
self.reader = SFNTReader(file, checkChecksums, fontNumber=fontNumber)
self.sfntVersion = self.reader.sfntVersion
self.flavor = self.reader.flavor
self.flavorData = self.reader.flavorData
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
"""If we still have a reader object, close it."""
if self.reader is not None:
self.reader.close()
def save(self, file, reorderTables=True):
"""Save the font to disk. Similarly to the constructor,
the 'file' argument can be either a pathname or a writable
file object.
"""
if not hasattr(file, "write"):
if self.lazy and self.reader.file.name == file:
raise TTLibError(
"Can't overwrite TTFont when 'lazy' attribute is True")
createStream = True
else:
# assume "file" is a writable file object
createStream = False
tmp = BytesIO()
writer_reordersTables = self._save(tmp)
if not (reorderTables is None or writer_reordersTables or
(reorderTables is False and self.reader is None)):
if reorderTables is False:
# sort tables using the original font's order
tableOrder = list(self.reader.keys())
else:
# use the recommended order from the OpenType specification
tableOrder = None
tmp.flush()
tmp2 = BytesIO()
reorderFontTables(tmp, tmp2, tableOrder)
tmp.close()
tmp = tmp2
if createStream:
# "file" is a path
with open(file, "wb") as file:
file.write(tmp.getvalue())
else:
file.write(tmp.getvalue())
tmp.close()
def _save(self, file, tableCache=None):
"""Internal function, to be shared by save() and TTCollection.save()"""
if self.recalcTimestamp and 'head' in self:
self['head'] # make sure 'head' is loaded so the recalculation is actually done
tags = list(self.keys())
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
numTables = len(tags)
# write to a temporary stream to allow saving to unseekable streams
writer = SFNTWriter(file, numTables, self.sfntVersion, self.flavor, self.flavorData)
done = []
for tag in tags:
self._writeTable(tag, writer, done, tableCache)
writer.close()
return writer.reordersTables()
def saveXML(self, fileOrPath, newlinestr=None, **kwargs):
"""Export the font as TTX (an XML-based text file), or as a series of text
files when splitTables is true. In the latter case, the 'fileOrPath'
argument should be a path to a directory.
The 'tables' argument must either be false (dump all tables) or a
list of tables to dump. The 'skipTables' argument may be a list of tables
to skip, but only when the 'tables' argument is false.
"""
writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
self._saveXML(writer, **kwargs)
writer.close()
def _saveXML(self, writer,
writeVersion=True,
quiet=None, tables=None, skipTables=None, splitTables=False,
splitGlyphs=False, disassembleInstructions=True,
bitmapGlyphDataFormat='raw'):
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
self.disassembleInstructions = disassembleInstructions
self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
if not tables:
tables = list(self.keys())
if "GlyphOrder" not in tables:
tables = ["GlyphOrder"] + tables
if skipTables:
for tag in skipTables:
if tag in tables:
tables.remove(tag)
numTables = len(tables)
if writeVersion:
from fontTools import version
version = ".".join(version.split('.')[:2])
writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1],
ttLibVersion=version)
else:
writer.begintag("ttFont", sfntVersion=repr(tostr(self.sfntVersion))[1:-1])
writer.newline()
# always splitTables if splitGlyphs is enabled
splitTables = splitTables or splitGlyphs
if not splitTables:
writer.newline()
else:
path, ext = os.path.splitext(writer.filename)
fileNameTemplate = path + ".%s" + ext
for i in range(numTables):
tag = tables[i]
if splitTables:
tablePath = fileNameTemplate % tagToIdentifier(tag)
tableWriter = xmlWriter.XMLWriter(tablePath,
newlinestr=writer.newlinestr)
tableWriter.begintag("ttFont", ttLibVersion=version)
tableWriter.newline()
tableWriter.newline()
writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
writer.newline()
else:
tableWriter = writer
self._tableToXML(tableWriter, tag, splitGlyphs=splitGlyphs)
if splitTables:
tableWriter.endtag("ttFont")
tableWriter.newline()
tableWriter.close()
writer.endtag("ttFont")
writer.newline()
def _tableToXML(self, writer, tag, quiet=None, splitGlyphs=False):
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
if tag in self:
table = self[tag]
report = "Dumping '%s' table..." % tag
else:
report = "No '%s' table found." % tag
log.info(report)
if tag not in self:
return
xmlTag = tagToXML(tag)
attrs = dict()
if hasattr(table, "ERROR"):
attrs['ERROR'] = "decompilation error"
from .tables.DefaultTable import DefaultTable
if table.__class__ == DefaultTable:
attrs['raw'] = True
writer.begintag(xmlTag, **attrs)
writer.newline()
if tag == "glyf":
table.toXML(writer, self, splitGlyphs=splitGlyphs)
else:
table.toXML(writer, self)
writer.endtag(xmlTag)
writer.newline()
writer.newline()
def importXML(self, fileOrPath, quiet=None):
"""Import a TTX file (an XML-based text format), so as to recreate
a font object.
"""
if quiet is not None:
deprecateArgument("quiet", "configure logging instead")
if "maxp" in self and "post" in self:
# Make sure the glyph order is loaded, as it otherwise gets
# lost if the XML doesn't contain the glyph order, yet does
# contain the table which was originally used to extract the
# glyph names from (ie. 'post', 'cmap' or 'CFF ').
self.getGlyphOrder()
from fontTools.misc import xmlReader
reader = xmlReader.XMLReader(fileOrPath, self)
reader.read()
def isLoaded(self, tag):
"""Return true if the table identified by 'tag' has been
decompiled and loaded into memory."""
return tag in self.tables
def has_key(self, tag):
if self.isLoaded(tag):
return True
elif self.reader and tag in self.reader:
return True
elif tag == "GlyphOrder":
return True
else:
return False
__contains__ = has_key
def keys(self):
keys = list(self.tables.keys())
if self.reader:
for key in list(self.reader.keys()):
if key not in keys:
keys.append(key)
if "GlyphOrder" in keys:
keys.remove("GlyphOrder")
keys = sortedTagList(keys)
return ["GlyphOrder"] + keys
def __len__(self):
return len(list(self.keys()))
def __getitem__(self, tag):
tag = Tag(tag)
table = self.tables.get(tag)
if table is None:
if tag == "GlyphOrder":
table = GlyphOrder(tag)
self.tables[tag] = table
elif self.reader is not None:
table = self._readTable(tag)
else:
raise KeyError("'%s' table not found" % tag)
return table
def _readTable(self, tag):
log.debug("Reading '%s' table from disk", tag)
data = self.reader[tag]
if self._tableCache is not None:
table = self._tableCache.get((tag, data))
if table is not None:
return table
tableClass = getTableClass(tag)
table = tableClass(tag)
self.tables[tag] = table
log.debug("Decompiling '%s' table", tag)
try:
table.decompile(data, self)
except Exception:
if not self.ignoreDecompileErrors:
raise
# fall back to DefaultTable, retaining the binary table data
log.exception(
"An exception occurred during the decompilation of the '%s' table", tag)
from .tables.DefaultTable import DefaultTable
file = StringIO()
traceback.print_exc(file=file)
table = DefaultTable(tag)
table.ERROR = file.getvalue()
self.tables[tag] = table
table.decompile(data, self)
if self._tableCache is not None:
self._tableCache[(tag, data)] = table
return table
def __setitem__(self, tag, table):
self.tables[Tag(tag)] = table
def __delitem__(self, tag):
if tag not in self:
raise KeyError("'%s' table not found" % tag)
if tag in self.tables:
del self.tables[tag]
if self.reader and tag in self.reader:
del self.reader[tag]
def get(self, tag, default=None):
try:
return self[tag]
except KeyError:
return default
def setGlyphOrder(self, glyphOrder):
self.glyphOrder = glyphOrder
def getGlyphOrder(self):
try:
return self.glyphOrder
except AttributeError:
pass
if 'CFF ' in self:
cff = self['CFF ']
self.glyphOrder = cff.getGlyphOrder()
elif 'post' in self:
# TrueType font
glyphOrder = self['post'].getGlyphOrder()
if glyphOrder is None:
#
# No names found in the 'post' table.
# Try to create glyph names from the unicode cmap (if available)
# in combination with the Adobe Glyph List (AGL).
#
self._getGlyphNamesFromCmap()
else:
self.glyphOrder = glyphOrder
else:
self._getGlyphNamesFromCmap()
return self.glyphOrder
def _getGlyphNamesFromCmap(self):
#
# This is rather convoluted, but then again, it's an interesting problem:
# - we need to use the unicode values found in the cmap table to
# build glyph names (eg. because there is only a minimal post table,
# or none at all).
# - but the cmap parser also needs glyph names to work with...
# So here's what we do:
# - make up glyph names based on glyphID
# - load a temporary cmap table based on those names
# - extract the unicode values, build the "real" glyph names
# - unload the temporary cmap table
#
if self.isLoaded("cmap"):
# Bootstrapping: we're getting called by the cmap parser
# itself. This means self.tables['cmap'] contains a partially
# loaded cmap, making it impossible to get at a unicode
# subtable here. We remove the partially loaded cmap and
# restore it later.
# This only happens if the cmap table is loaded before any
# other table that does f.getGlyphOrder() or f.getGlyphName().
cmapLoading = self.tables['cmap']
del self.tables['cmap']
else:
cmapLoading = None
# Make up glyph names based on glyphID, which will be used by the
# temporary cmap and by the real cmap in case we don't find a unicode
# cmap.
numGlyphs = int(self['maxp'].numGlyphs)
glyphOrder = [None] * numGlyphs
glyphOrder[0] = ".notdef"
for i in range(1, numGlyphs):
glyphOrder[i] = "glyph%.5d" % i
# Set the glyph order, so the cmap parser has something
# to work with (so we don't get called recursively).
self.glyphOrder = glyphOrder
# Make up glyph names based on the reversed cmap table. Because some
# glyphs (eg. ligatures or alternates) may not be reachable via cmap,
# this naming table will usually not cover all glyphs in the font.
# If the font has no Unicode cmap table, reversecmap will be empty.
if 'cmap' in self:
reversecmap = self['cmap'].buildReversed()
else:
reversecmap = {}
useCount = {}
for i in range(numGlyphs):
tempName = glyphOrder[i]
if tempName in reversecmap:
# If a font maps both U+0041 LATIN CAPITAL LETTER A and
# U+0391 GREEK CAPITAL LETTER ALPHA to the same glyph,
# we prefer naming the glyph as "A".
glyphName = self._makeGlyphName(min(reversecmap[tempName]))
numUses = useCount[glyphName] = useCount.get(glyphName, 0) + 1
if numUses > 1:
glyphName = "%s.alt%d" % (glyphName, numUses - 1)
glyphOrder[i] = glyphName
if 'cmap' in self:
# Delete the temporary cmap table from the cache, so it can
# be parsed again with the right names.
del self.tables['cmap']
self.glyphOrder = glyphOrder
if cmapLoading:
# restore partially loaded cmap, so it can continue loading
# using the proper names.
self.tables['cmap'] = cmapLoading
@staticmethod
def _makeGlyphName(codepoint):
from fontTools import agl # Adobe Glyph List
if codepoint in agl.UV2AGL:
return agl.UV2AGL[codepoint]
elif codepoint <= 0xFFFF:
return "uni%04X" % codepoint
else:
return "u%X" % codepoint
def getGlyphNames(self):
"""Get a list of glyph names, sorted alphabetically."""
glyphNames = sorted(self.getGlyphOrder())
return glyphNames
def getGlyphNames2(self):
"""Get a list of glyph names, sorted alphabetically,
but not case sensitive.
"""
from fontTools.misc import textTools
return textTools.caselessSort(self.getGlyphOrder())
def getGlyphName(self, glyphID, requireReal=False):
try:
return self.getGlyphOrder()[glyphID]
except IndexError:
if requireReal or not self.allowVID:
# XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in
# the cmap table than there are glyphs. I don't think it's legal...
return "glyph%.5d" % glyphID
else:
# user intends virtual GID support
try:
glyphName = self.VIDDict[glyphID]
except KeyError:
glyphName ="glyph%.5d" % glyphID
self.last_vid = min(glyphID, self.last_vid )
self.reverseVIDDict[glyphName] = glyphID
self.VIDDict[glyphID] = glyphName
return glyphName
def getGlyphID(self, glyphName, requireReal=False):
if not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
glyphOrder = self.getGlyphOrder()
d = self._reverseGlyphOrderDict
if glyphName not in d:
if glyphName in glyphOrder:
self._buildReverseGlyphOrderDict()
return self.getGlyphID(glyphName)
else:
if requireReal:
raise KeyError(glyphName)
elif not self.allowVID:
# Handle glyphXXX only
if glyphName[:5] == "glyph":
try:
return int(glyphName[5:])
except (NameError, ValueError):
raise KeyError(glyphName)
else:
# user intends virtual GID support
try:
glyphID = self.reverseVIDDict[glyphName]
except KeyError:
# if name is in glyphXXX format, use the specified name.
if glyphName[:5] == "glyph":
try:
glyphID = int(glyphName[5:])
except (NameError, ValueError):
glyphID = None
if glyphID is None:
glyphID = self.last_vid -1
self.last_vid = glyphID
self.reverseVIDDict[glyphName] = glyphID
self.VIDDict[glyphID] = glyphName
return glyphID
glyphID = d[glyphName]
if glyphName != glyphOrder[glyphID]:
self._buildReverseGlyphOrderDict()
return self.getGlyphID(glyphName)
return glyphID
def getReverseGlyphMap(self, rebuild=False):
if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
self._buildReverseGlyphOrderDict()
return self._reverseGlyphOrderDict
def _buildReverseGlyphOrderDict(self):
self._reverseGlyphOrderDict = d = {}
glyphOrder = self.getGlyphOrder()
for glyphID in range(len(glyphOrder)):
d[glyphOrder[glyphID]] = glyphID
def _writeTable(self, tag, writer, done, tableCache=None):
"""Internal helper function for self.save(). Keeps track of
inter-table dependencies.
"""
if tag in done:
return
tableClass = getTableClass(tag)
for masterTable in tableClass.dependencies:
if masterTable not in done:
if masterTable in self:
self._writeTable(masterTable, writer, done, tableCache)
else:
done.append(masterTable)
done.append(tag)
tabledata = self.getTableData(tag)
if tableCache is not None:
entry = tableCache.get((Tag(tag), tabledata))
if entry is not None:
log.debug("reusing '%s' table", tag)
writer.setEntry(tag, entry)
return
log.debug("Writing '%s' table to disk", tag)
writer[tag] = tabledata
if tableCache is not None:
tableCache[(Tag(tag), tabledata)] = writer[tag]
def getTableData(self, tag):
"""Returns raw table data, whether compiled or directly read from disk.
"""
tag = Tag(tag)
if self.isLoaded(tag):
log.debug("Compiling '%s' table", tag)
return self.tables[tag].compile(self)
elif self.reader and tag in self.reader:
log.debug("Reading '%s' table from disk", tag)
return self.reader[tag]
else:
raise KeyError(tag)
def getGlyphSet(self, preferCFF=True):
"""Return a generic GlyphSet, which is a dict-like object
mapping glyph names to glyph objects. The returned glyph objects
have a .draw() method that supports the Pen protocol, and will
have an attribute named 'width'.
If the font is CFF-based, the outlines will be taken from the 'CFF ' or
'CFF2' tables. Otherwise the outlines will be taken from the 'glyf' table.
If the font contains both a 'CFF '/'CFF2' and a 'glyf' table, you can use
the 'preferCFF' argument to specify which one should be taken. If the
font contains both a 'CFF ' and a 'CFF2' table, the latter is taken.
"""
glyphs = None
if (preferCFF and any(tb in self for tb in ["CFF ", "CFF2"]) or
("glyf" not in self and any(tb in self for tb in ["CFF ", "CFF2"]))):
table_tag = "CFF2" if "CFF2" in self else "CFF "
glyphs = _TTGlyphSet(self,
list(self[table_tag].cff.values())[0].CharStrings, _TTGlyphCFF)
if glyphs is None and "glyf" in self:
glyphs = _TTGlyphSet(self, self["glyf"], _TTGlyphGlyf)
if glyphs is None:
raise TTLibError("Font contains no outlines")
return glyphs
def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
"""Return the 'best' unicode cmap dictionary available in the font,
or None, if no unicode cmap subtable is available.
By default it will search for the following (platformID, platEncID)
pairs:
(3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0)
This can be customized via the cmapPreferences argument.
"""
return self["cmap"].getBestCmap(cmapPreferences=cmapPreferences)
class _TTGlyphSet(object):
"""Generic dict-like GlyphSet class that pulls metrics from hmtx and
glyph shape from TrueType or CFF.
"""
def __init__(self, ttFont, glyphs, glyphType):
"""Construct a new glyphset.
Args:
font (TTFont): The font object (used to get metrics).
glyphs (dict): A dictionary mapping glyph names to ``_TTGlyph`` objects.
glyphType (class): Either ``_TTGlyphCFF`` or ``_TTGlyphGlyf``.
"""
self._glyphs = glyphs
self._hmtx = ttFont['hmtx']
self._vmtx = ttFont['vmtx'] if 'vmtx' in ttFont else None
self._glyphType = glyphType
def keys(self):
return list(self._glyphs.keys())
def has_key(self, glyphName):
return glyphName in self._glyphs
__contains__ = has_key
def __getitem__(self, glyphName):
horizontalMetrics = self._hmtx[glyphName]
verticalMetrics = self._vmtx[glyphName] if self._vmtx else None
return self._glyphType(
self, self._glyphs[glyphName], horizontalMetrics, verticalMetrics)
def __len__(self):
return len(self._glyphs)
def get(self, glyphName, default=None):
try:
return self[glyphName]
except KeyError:
return default
class _TTGlyph(object):
"""Wrapper for a TrueType glyph that supports the Pen protocol, meaning
that it has .draw() and .drawPoints() methods that take a pen object as
their only argument. Additionally there are 'width' and 'lsb' attributes,
read from the 'hmtx' table.
If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
attributes.
"""
def __init__(self, glyphset, glyph, horizontalMetrics, verticalMetrics=None):
"""Construct a new _TTGlyph.
Args:
glyphset (_TTGlyphSet): A glyphset object used to resolve components.
glyph (ttLib.tables._g_l_y_f.Glyph): The glyph object.
horizontalMetrics (int, int): The glyph's width and left sidebearing.
"""
self._glyphset = glyphset
self._glyph = glyph
self.width, self.lsb = horizontalMetrics
if verticalMetrics:
self.height, self.tsb = verticalMetrics
else:
self.height, self.tsb = None, None
def draw(self, pen):
"""Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
how that works.
"""
self._glyph.draw(pen)
def drawPoints(self, pen):
# drawPoints is only implemented for _TTGlyphGlyf at this time.
raise NotImplementedError()
class _TTGlyphCFF(_TTGlyph):
pass
class _TTGlyphGlyf(_TTGlyph):
def draw(self, pen):
"""Draw the glyph onto Pen. See fontTools.pens.basePen for details
how that works.
"""
glyfTable = self._glyphset._glyphs
glyph = self._glyph
offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
glyph.draw(pen, glyfTable, offset)
def drawPoints(self, pen):
"""Draw the glyph onto PointPen. See fontTools.pens.pointPen
for details how that works.
"""
glyfTable = self._glyphset._glyphs
glyph = self._glyph
offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
glyph.drawPoints(pen, glyfTable, offset)
class GlyphOrder(object):
"""A pseudo table. The glyph order isn't in the font as a separate
table, but it's nice to present it as such in the TTX format.
"""
def __init__(self, tag=None):
pass
def toXML(self, writer, ttFont):
glyphOrder = ttFont.getGlyphOrder()
writer.comment("The 'id' attribute is only for humans; "
"it is ignored when parsed.")
writer.newline()
for i in range(len(glyphOrder)):
glyphName = glyphOrder[i]
writer.simpletag("GlyphID", id=i, name=glyphName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "glyphOrder"):
self.glyphOrder = []
ttFont.setGlyphOrder(self.glyphOrder)
if name == "GlyphID":
self.glyphOrder.append(attrs["name"])
def getTableModule(tag):
"""Fetch the packer/unpacker module for a table.
Return None when no module is found.
"""
from . import tables
pyTag = tagToIdentifier(tag)
try:
__import__("fontTools.ttLib.tables." + pyTag)
except ImportError as err:
# If pyTag is found in the ImportError message,
# means table is not implemented. If it's not
# there, then some other module is missing, don't
# suppress the error.
if str(err).find(pyTag) >= 0:
return None
else:
raise err
else:
return getattr(tables, pyTag)
# Registry for custom table packer/unpacker classes. Keys are table
# tags, values are (moduleName, className) tuples.
# See registerCustomTableClass() and getCustomTableClass()
_customTableRegistry = {}
def registerCustomTableClass(tag, moduleName, className=None):
"""Register a custom packer/unpacker class for a table.
The 'moduleName' must be an importable module. If no 'className'
is given, it is derived from the tag, for example it will be
table_C_U_S_T_ for a 'CUST' tag.
The registered table class should be a subclass of
fontTools.ttLib.tables.DefaultTable.DefaultTable
"""
if className is None:
className = "table_" + tagToIdentifier(tag)
_customTableRegistry[tag] = (moduleName, className)
def unregisterCustomTableClass(tag):
"""Unregister the custom packer/unpacker class for a table."""
del _customTableRegistry[tag]
def getCustomTableClass(tag):
"""Return the custom table class for tag, if one has been registered
with 'registerCustomTableClass()'. Else return None.
"""
if tag not in _customTableRegistry:
return None
import importlib
moduleName, className = _customTableRegistry[tag]
module = importlib.import_module(moduleName)
return getattr(module, className)
def getTableClass(tag):
"""Fetch the packer/unpacker class for a table."""
tableClass = getCustomTableClass(tag)
if tableClass is not None:
return tableClass
module = getTableModule(tag)
if module is None:
from .tables.DefaultTable import DefaultTable
return DefaultTable
pyTag = tagToIdentifier(tag)
tableClass = getattr(module, "table_" + pyTag)
return tableClass
def getClassTag(klass):
"""Fetch the table tag for a class object."""
name = klass.__name__
assert name[:6] == 'table_'
name = name[6:] # Chop 'table_'
return identifierToTag(name)
def newTable(tag):
"""Return a new instance of a table."""
tableClass = getTableClass(tag)
return tableClass(tag)
def _escapechar(c):
"""Helper function for tagToIdentifier()"""
import re
if re.match("[a-z0-9]", c):
return "_" + c
elif re.match("[A-Z]", c):
return c + "_"
else:
return hex(byteord(c))[2:]
def tagToIdentifier(tag):
"""Convert a table tag to a valid (but UGLY) python identifier,
as well as a filename that's guaranteed to be unique even on a
caseless file system. Each character is mapped to two characters.
Lowercase letters get an underscore before the letter, uppercase
letters get an underscore after the letter. Trailing spaces are
trimmed. Illegal characters are escaped as two hex bytes. If the
result starts with a number (as the result of a hex escape), an
extra underscore is prepended. Examples:
'glyf' -> '_g_l_y_f'
'cvt ' -> '_c_v_t'
'OS/2' -> 'O_S_2f_2'
"""
import re
tag = Tag(tag)
if tag == "GlyphOrder":
return tag
assert len(tag) == 4, "tag should be 4 characters long"
while len(tag) > 1 and tag[-1] == ' ':
tag = tag[:-1]
ident = ""
for c in tag:
ident = ident + _escapechar(c)
if re.match("[0-9]", ident):
ident = "_" + ident
return ident
def identifierToTag(ident):
"""the opposite of tagToIdentifier()"""
if ident == "GlyphOrder":
return ident
if len(ident) % 2 and ident[0] == "_":
ident = ident[1:]
assert not (len(ident) % 2)
tag = ""
for i in range(0, len(ident), 2):
if ident[i] == "_":
tag = tag + ident[i+1]
elif ident[i+1] == "_":
tag = tag + ident[i]
else:
# assume hex
tag = tag + chr(int(ident[i:i+2], 16))
# append trailing spaces
tag = tag + (4 - len(tag)) * ' '
return Tag(tag)
def tagToXML(tag):
"""Similarly to tagToIdentifier(), this converts a TT tag
to a valid XML element name. Since XML element names are
case sensitive, this is a fairly simple/readable translation.
"""
import re
tag = Tag(tag)
if tag == "OS/2":
return "OS_2"
elif tag == "GlyphOrder":
return tag
if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
return tag.strip()
else:
return tagToIdentifier(tag)
def xmlToTag(tag):
"""The opposite of tagToXML()"""
if tag == "OS_2":
return Tag("OS/2")
if len(tag) == 8:
return identifierToTag(tag)
else:
return Tag(tag + " " * (4 - len(tag)))
# Table order as recommended in the OpenType specification 1.4
TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
"hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
"kern", "name", "post", "gasp", "PCLT"]
OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
"CFF "]
def sortedTagList(tagList, tableOrder=None):
"""Return a sorted copy of tagList, sorted according to the OpenType
specification, or according to a custom tableOrder. If given and not
None, tableOrder needs to be a list of tag names.
"""
tagList = sorted(tagList)
if tableOrder is None:
if "DSIG" in tagList:
# DSIG should be last (XXX spec reference?)
tagList.remove("DSIG")
tagList.append("DSIG")
if "CFF " in tagList:
tableOrder = OTFTableOrder
else:
tableOrder = TTFTableOrder
orderedTables = []
for tag in tableOrder:
if tag in tagList:
orderedTables.append(tag)
tagList.remove(tag)
orderedTables.extend(tagList)
return orderedTables
def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
"""Rewrite a font file, ordering the tables as recommended by the
OpenType specification 1.4.
"""
inFile.seek(0)
outFile.seek(0)
reader = SFNTReader(inFile, checkChecksums=checkChecksums)
writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
tables = list(reader.keys())
for tag in sortedTagList(tables, tableOrder):
writer[tag] = reader[tag]
writer.close()
def maxPowerOfTwo(x):
"""Return the highest exponent of two, so that
(2 ** exponent) <= x. Return 0 if x is 0.
"""
exponent = 0
while x:
x = x >> 1
exponent = exponent + 1
return max(exponent - 1, 0)
def getSearchRange(n, itemSize=16):
"""Calculate searchRange, entrySelector, rangeShift.
"""
# itemSize defaults to 16, for backward compatibility
# with upstream fonttools.
exponent = maxPowerOfTwo(n)
searchRange = (2 ** exponent) * itemSize
entrySelector = exponent
rangeShift = max(0, n * itemSize - searchRange)
return searchRange, entrySelector, rangeShift
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/ttFont.py",
"copies": "5",
"size": "33434",
"license": "apache-2.0",
"hash": 2426428006813346300,
"line_mean": 30.3052434457,
"line_max": 156,
"alpha_frac": 0.6979123048,
"autogenerated": false,
"ratio": 3.2154260434698982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6413338348269898,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.psCharStrings import T2CharString
from ufo2fdk.pens import RelativeCoordinatePen, roundInt, roundIntPoint
class T2CharStringPen(RelativeCoordinatePen):
def __init__(self, width, glyphSet):
RelativeCoordinatePen.__init__(self, glyphSet)
self._heldMove = None
self._program = []
if width is not None:
self._program.append(roundInt(width))
def _moveTo(self, pt):
RelativeCoordinatePen._moveTo(self, roundIntPoint(pt))
def _relativeMoveTo(self, pt):
pt = roundIntPoint(pt)
x, y = pt
self._heldMove = [x, y, "rmoveto"]
def _storeHeldMove(self):
if self._heldMove is not None:
self._program.extend(self._heldMove)
self._heldMove = None
def _lineTo(self, pt):
RelativeCoordinatePen._lineTo(self, roundIntPoint(pt))
def _relativeLineTo(self, pt):
self._storeHeldMove()
pt = roundIntPoint(pt)
x, y = pt
self._program.extend([x, y, "rlineto"])
def _curveToOne(self, pt1, pt2, pt3):
RelativeCoordinatePen._curveToOne(self, roundIntPoint(pt1), roundIntPoint(pt2), roundIntPoint(pt3))
def _relativeCurveToOne(self, pt1, pt2, pt3):
self._storeHeldMove()
pt1 = roundIntPoint(pt1)
pt2 = roundIntPoint(pt2)
pt3 = roundIntPoint(pt3)
x1, y1 = pt1
x2, y2 = pt2
x3, y3 = pt3
self._program.extend([x1, y1, x2, y2, x3, y3, "rrcurveto"])
def _closePath(self):
pass
def _endPath(self):
pass
def getCharString(self, private=None, globalSubrs=None):
program = self._program + ["endchar"]
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
return charString
| {
"repo_name": "adrientetar/ufo2fdk",
"path": "Lib/ufo2fdk/pens/t2CharStringPen.py",
"copies": "5",
"size": "1805",
"license": "mit",
"hash": 3649745401133690000,
"line_mean": 30.1206896552,
"line_max": 107,
"alpha_frac": 0.6210526316,
"autogenerated": false,
"ratio": 3.418560606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037933094384707284,
"num_lines": 58
} |
from fontTools.misc.py23 import basestring
class Color(str):
"""
This object represents a color. This object is immutable.
The initial argument can be either a color string as defined in the UFO
specification or a sequence of (red, green, blue, alpha) components.
By calling str(colorObject) you will get a UFO compatible color string.
You can also iterate over the object to create a sequence::
colorTuple = tuple(colorObject)
"""
def __new__(self, value):
# convert from string
if isinstance(value, basestring):
value = _stringToSequence(value)
r, g, b, a = value
# validate the values
color = (("r", r), ("g", g), ("b", b), ("a", a))
for component, v in color:
if v < 0 or v > 1:
raise ValueError("The color for %s (%s) is not between 0 and 1." % (component, str(v)))
# convert back to a normalized string
r = _stringify(r)
g = _stringify(g)
b = _stringify(b)
a = _stringify(a)
s = ",".join((r, g, b, a))
# call the super
return super(Color, self).__new__(Color, s)
def __iter__(self):
value = _stringToSequence(self)
return iter(value)
def _get_r(self):
return _stringToSequence(self)[0]
r = property(_get_r, "The red component.")
def _get_g(self):
return _stringToSequence(self)[1]
g = property(_get_g, "The green component.")
def _get_b(self):
return _stringToSequence(self)[2]
b = property(_get_b, "The blue component.")
def _get_a(self):
return _stringToSequence(self)[3]
a = property(_get_a, "The alpha component.")
def _stringToSequence(value):
r, g, b, a = [i.strip() for i in value.split(",")]
value = []
for component in (r, g, b, a):
try:
v = int(component)
value.append(v)
continue
except ValueError:
pass
v = float(component)
value.append(v)
return value
def _stringify(v):
"""
>>> _stringify(1)
'1'
>>> _stringify(.1)
'0.1'
>>> _stringify(.01)
'0.01'
>>> _stringify(.001)
'0.001'
>>> _stringify(.0001)
'0.0001'
>>> _stringify(.00001)
'0.00001'
>>> _stringify(.000001)
'0'
>>> _stringify(.000005)
'0.00001'
"""
# it's an int
i = int(v)
if v == i:
return str(i)
# it's a float
else:
# find the shortest possible float
for i in range(1, 6):
s = "%%.%df" % i
s = s % v
if float(s) == v:
break
# see if the result can be converted to an int
f = float(s)
i = int(f)
if f == i:
return str(i)
# otherwise return the float
return s
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "typesupply/defcon",
"path": "Lib/defcon/objects/color.py",
"copies": "2",
"size": "2930",
"license": "mit",
"hash": 5391988265734510000,
"line_mean": 23.6218487395,
"line_max": 103,
"alpha_frac": 0.5225255973,
"autogenerated": false,
"ratio": 3.5995085995085994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5122034196808599,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytechr, byteord, bytesjoin, strjoin
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
from . import DefaultTable
import itertools
import os
import struct
import logging
log = logging.getLogger(__name__)
ebdtTableVersionFormat = """
> # big endian
version: 16.16F
"""
ebdtComponentFormat = """
> # big endian
glyphCode: H
xOffset: b
yOffset: b
"""
class table_E_B_D_T_(DefaultTable.DefaultTable):
# Keep a reference to the name of the data locator table.
locatorName = 'EBLC'
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getImageFormatClass(self, imageFormat):
return ebdt_bitmap_classes[imageFormat]
def decompile(self, data, ttFont):
# Get the version but don't advance the slice.
# Most of the lookup for this table is done relative
# to the begining so slice by the offsets provided
# in the EBLC table.
sstruct.unpack2(ebdtTableVersionFormat, data, self)
# Keep a dict of glyphs that have been seen so they aren't remade.
# This dict maps intervals of data to the BitmapGlyph.
glyphDict = {}
# Pull out the EBLC table and loop through glyphs.
# A strike is a concept that spans both tables.
# The actual bitmap data is stored in the EBDT.
locator = ttFont[self.__class__.locatorName]
self.strikeData = []
for curStrike in locator.strikes:
bitmapGlyphDict = {}
self.strikeData.append(bitmapGlyphDict)
for indexSubTable in curStrike.indexSubTables:
dataIter = zip(indexSubTable.names, indexSubTable.locations)
for curName, curLoc in dataIter:
# Don't create duplicate data entries for the same glyphs.
# Instead just use the structures that already exist if they exist.
if curLoc in glyphDict:
curGlyph = glyphDict[curLoc]
else:
curGlyphData = data[slice(*curLoc)]
imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat)
curGlyph = imageFormatClass(curGlyphData, ttFont)
glyphDict[curLoc] = curGlyph
bitmapGlyphDict[curName] = curGlyph
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
dataSize = len(dataList[0])
# Keep a dict of glyphs that have been seen so they aren't remade.
# This dict maps the id of the BitmapGlyph to the interval
# in the data.
glyphDict = {}
# Go through the bitmap glyph data. Just in case the data for a glyph
# changed the size metrics should be recalculated. There are a variety
# of formats and they get stored in the EBLC table. That is why
# recalculation is defered to the EblcIndexSubTable class and just
# pass what is known about bitmap glyphs from this particular table.
locator = ttFont[self.__class__.locatorName]
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
for curIndexSubTable in curStrike.indexSubTables:
dataLocations = []
for curName in curIndexSubTable.names:
# Handle the data placement based on seeing the glyph or not.
# Just save a reference to the location if the glyph has already
# been saved in compile. This code assumes that glyphs will only
# be referenced multiple times from indexFormat5. By luck the
# code may still work when referencing poorly ordered fonts with
# duplicate references. If there is a font that is unlucky the
# respective compile methods for the indexSubTables will fail
# their assertions. All fonts seem to follow this assumption.
# More complicated packing may be needed if a counter-font exists.
glyph = curGlyphDict[curName]
objectId = id(glyph)
if objectId not in glyphDict:
data = glyph.compile(ttFont)
data = curIndexSubTable.padBitmapData(data)
startByte = dataSize
dataSize += len(data)
endByte = dataSize
dataList.append(data)
dataLoc = (startByte, endByte)
glyphDict[objectId] = dataLoc
else:
dataLoc = glyphDict[objectId]
dataLocations.append(dataLoc)
# Just use the new data locations in the indexSubTable.
# The respective compile implementations will take care
# of any of the problems in the convertion that may arise.
curIndexSubTable.locations = dataLocations
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
# When exporting to XML if one of the data export formats
# requires metrics then those metrics may be in the locator.
# In this case populate the bitmaps with "export metrics".
if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'):
locator = ttFont[self.__class__.locatorName]
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
for curIndexSubTable in curStrike.indexSubTables:
for curName in curIndexSubTable.names:
glyph = curGlyphDict[curName]
# I'm not sure which metrics have priority here.
# For now if both metrics exist go with glyph metrics.
if hasattr(glyph, 'metrics'):
glyph.exportMetrics = glyph.metrics
else:
glyph.exportMetrics = curIndexSubTable.metrics
glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
writer.simpletag("header", [('version', self.version)])
writer.newline()
locator = ttFont[self.__class__.locatorName]
for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
writer.begintag('strikedata', [('index', strikeIndex)])
writer.newline()
for curName, curBitmap in bitmapGlyphDict.items():
curBitmap.toXML(strikeIndex, curName, writer, ttFont)
writer.endtag('strikedata')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strikedata':
if not hasattr(self, 'strikeData'):
self.strikeData = []
strikeIndex = safeEval(attrs['index'])
bitmapGlyphDict = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix):])
glyphName = attrs['name']
imageFormatClass = self.getImageFormatClass(imageFormat)
curGlyph = imageFormatClass(None, None)
curGlyph.fromXML(name, attrs, content, ttFont)
assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName
bitmapGlyphDict[glyphName] = curGlyph
else:
log.warning("%s being ignored by %s", name, self.__class__.__name__)
# Grow the strike data array to the appropriate size. The XML
# format allows the strike index value to be out of order.
if strikeIndex >= len(self.strikeData):
self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices."
self.strikeData[strikeIndex] = bitmapGlyphDict
class EbdtComponent(object):
def toXML(self, writer, ttFont):
writer.begintag('ebdtComponent', [('name', self.name)])
writer.newline()
for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
writer.simpletag(componentName, value=getattr(self, componentName))
writer.newline()
writer.endtag('ebdtComponent')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.name = attrs['name']
componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in componentNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
# Helper functions for dealing with binary.
def _data2binary(data, numBits):
binaryList = []
for curByte in data:
value = byteord(curByte)
numBitsCut = min(8, numBits)
for i in range(numBitsCut):
if value & 0x1:
binaryList.append('1')
else:
binaryList.append('0')
value = value >> 1
numBits -= numBitsCut
return strjoin(binaryList)
def _binary2data(binary):
byteList = []
for bitLoc in range(0, len(binary), 8):
byteString = binary[bitLoc:bitLoc+8]
curByte = 0
for curBit in reversed(byteString):
curByte = curByte << 1
if curBit == '1':
curByte |= 1
byteList.append(bytechr(curByte))
return bytesjoin(byteList)
def _memoize(f):
class memodict(dict):
def __missing__(self, key):
ret = f(key)
if len(key) == 1:
self[key] = ret
return ret
return memodict().__getitem__
# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
# Bitmap data per byte is in the order that binary is written on the page
# with the least significant bit as far right as possible. This is the
# opposite of what makes sense algorithmically and hence this function.
@_memoize
def _reverseBytes(data):
if len(data) != 1:
return bytesjoin(map(_reverseBytes, data))
byte = byteord(data)
result = 0
for i in range(8):
result = result << 1
result |= byte & 1
byte = byte >> 1
return bytechr(result)
# This section of code is for reading and writing image data to/from XML.
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
writer.begintag('rawimagedata')
writer.newline()
writer.dumphex(bitmapObject.imageData)
writer.endtag('rawimagedata')
writer.newline()
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
bitmapObject.imageData = readHex(content)
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
metrics = bitmapObject.exportMetrics
del bitmapObject.exportMetrics
bitDepth = bitmapObject.exportBitDepth
del bitmapObject.exportBitDepth
writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
writer.newline()
for curRow in range(metrics.height):
rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
writer.simpletag('row', value=hexStr(rowData))
writer.newline()
writer.endtag('rowimagedata')
writer.newline()
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
bitDepth = safeEval(attrs['bitDepth'])
metrics = SmallGlyphMetrics()
metrics.width = safeEval(attrs['width'])
metrics.height = safeEval(attrs['height'])
dataRows = []
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
# Chop off 'imagedata' from the tag to get just the option.
if name == 'row':
dataRows.append(deHexStr(attr['value']))
bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
metrics = bitmapObject.exportMetrics
del bitmapObject.exportMetrics
bitDepth = bitmapObject.exportBitDepth
del bitmapObject.exportBitDepth
# A dict for mapping binary to more readable/artistic ASCII characters.
binaryConv = {'0':'.', '1':'@'}
writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
writer.newline()
for curRow in range(metrics.height):
rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True)
rowData = _data2binary(rowData, metrics.width)
# Make the output a readable ASCII art form.
rowData = strjoin(map(binaryConv.get, rowData))
writer.simpletag('row', value=rowData)
writer.newline()
writer.endtag('bitwiseimagedata')
writer.newline()
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
bitDepth = safeEval(attrs['bitDepth'])
metrics = SmallGlyphMetrics()
metrics.width = safeEval(attrs['width'])
metrics.height = safeEval(attrs['height'])
# A dict for mapping from ASCII to binary. All characters are considered
# a '1' except space, period and '0' which maps to '0'.
binaryConv = {' ':'0', '.':'0', '0':'0'}
dataRows = []
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
if name == 'row':
mapParams = zip(attr['value'], itertools.repeat('1'))
rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
dataRows.append(_binary2data(rowData))
bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True)
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
try:
folder = os.path.dirname(writer.file.name)
except AttributeError:
# fall back to current directory if output file's directory isn't found
folder = '.'
folder = os.path.join(folder, 'bitmaps')
filename = glyphName + bitmapObject.fileExtension
if not os.path.isdir(folder):
os.makedirs(folder)
folder = os.path.join(folder, 'strike%d' % strikeIndex)
if not os.path.isdir(folder):
os.makedirs(folder)
fullPath = os.path.join(folder, filename)
writer.simpletag('extfileimagedata', value=fullPath)
writer.newline()
with open(fullPath, "wb") as file:
file.write(bitmapObject.imageData)
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
fullPath = attrs['value']
with open(fullPath, "rb") as file:
bitmapObject.imageData = file.read()
# End of XML writing code.
# Important information about the naming scheme. Used for identifying formats
# in XML.
_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_'
class BitmapGlyph(object):
# For the external file format. This can be changed in subclasses. This way
# when the extfile option is turned on files have the form: glyphName.ext
# The default is just a flat binary file with no meaning.
fileExtension = '.bin'
# Keep track of reading and writing of various forms.
xmlDataFunctions = {
'raw': (_writeRawImageData, _readRawImageData),
'row': (_writeRowImageData, _readRowImageData),
'bitwise': (_writeBitwiseImageData, _readBitwiseImageData),
'extfile': (_writeExtFileImageData, _readExtFileImageData),
}
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompilation is untested here...
#if not ttFont.lazy:
# self.decompile()
# del self.data
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
del self.data
return getattr(self, attr)
# Not a fan of this but it is needed for safer safety checking.
def getFormat(self):
return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):])
def toXML(self, strikeIndex, glyphName, writer, ttFont):
writer.begintag(self.__class__.__name__, [('name', glyphName)])
writer.newline()
self.writeMetrics(writer, ttFont)
# Use the internal write method to write using the correct output format.
self.writeData(strikeIndex, glyphName, writer, ttFont)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.readMetrics(name, attrs, content, ttFont)
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
if not name.endswith('imagedata'):
continue
# Chop off 'imagedata' from the tag to get just the option.
option = name[:-len('imagedata')]
assert option in self.__class__.xmlDataFunctions
self.readData(name, attr, content, ttFont)
# Some of the glyphs have the metrics. This allows for metrics to be
# added if the glyph format has them. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# The opposite of write metrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
def writeData(self, strikeIndex, glyphName, writer, ttFont):
try:
writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat]
except KeyError:
writeFunc = _writeRawImageData
writeFunc(strikeIndex, glyphName, self, writer, ttFont)
def readData(self, name, attrs, content, ttFont):
# Chop off 'imagedata' from the tag to get just the option.
option = name[:-len('imagedata')]
writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
readFunc(self, name, attrs, content, ttFont)
# A closure for creating a mixin for the two types of metrics handling.
# Most of the code is very similar so its easier to deal with here.
# Everything works just by passing the class that the mixin is for.
def _createBitmapPlusMetricsMixin(metricsClass):
# Both metrics names are listed here to make meaningful error messages.
metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
curMetricsName = metricsClass.__name__
# Find which metrics this is for and determine the opposite name.
metricsId = metricStrings.index(curMetricsName)
oppositeMetricsName = metricStrings[1-metricsId]
class BitmapPlusMetricsMixin(object):
def writeMetrics(self, writer, ttFont):
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == curMetricsName:
self.metrics = metricsClass()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == oppositeMetricsName:
log.warning("Warning: %s being ignored in format %d.", oppositeMetricsName, self.getFormat())
return BitmapPlusMetricsMixin
# Since there are only two types of mixin's just create them here.
BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
# Data that is bit aligned can be tricky to deal with. These classes implement
# helper functionality for dealing with the data and getting a particular row
# of bitwise data. Also helps implement fancy data export/import in XML.
class BitAlignedBitmapMixin(object):
def _getBitRange(self, row, bitDepth, metrics):
rowBits = (bitDepth * metrics.width)
bitOffset = row * rowBits
return (bitOffset, bitOffset+rowBits)
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
if metrics is None:
metrics = self.metrics
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
# Loop through each byte. This can cover two bytes in the original data or
# a single byte if things happen to be aligned. The very last entry might
# not be aligned so take care to trim the binary data to size and pad with
# zeros in the row data. Bit aligned data is somewhat tricky.
#
# Example of data cut. Data cut represented in x's.
# '|' represents byte boundary.
# data = ...0XX|XXXXXX00|000... => XXXXXXXX
# or
# data = ...0XX|XXXX0000|000... => XXXXXX00
# or
# data = ...000|XXXXXXXX|000... => XXXXXXXX
# or
# data = ...000|00XXXX00|000... => XXXX0000
#
dataList = []
bitRange = self._getBitRange(row, bitDepth, metrics)
stepRange = bitRange + (8,)
for curBit in range(*stepRange):
endBit = min(curBit+8, bitRange[1])
numBits = endBit - curBit
cutPoint = curBit % 8
firstByteLoc = curBit // 8
secondByteLoc = endBit // 8
if firstByteLoc < secondByteLoc:
numBitsCut = 8 - cutPoint
else:
numBitsCut = endBit - curBit
curByte = _reverseBytes(self.imageData[firstByteLoc])
firstHalf = byteord(curByte) >> cutPoint
firstHalf = ((1<<numBitsCut)-1) & firstHalf
newByte = firstHalf
if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
curByte = _reverseBytes(self.imageData[secondByteLoc])
secondHalf = byteord(curByte) << numBitsCut
newByte = (firstHalf | secondHalf) & ((1<<numBits)-1)
dataList.append(bytechr(newByte))
# The way the data is kept is opposite the algorithm used.
data = bytesjoin(dataList)
if not reverseBytes:
data = _reverseBytes(data)
return data
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
if metrics is None:
metrics = self.metrics
if not reverseBytes:
dataRows = list(map(_reverseBytes, dataRows))
# Keep track of a list of ordinal values as they are easier to modify
# than a list of strings. Map to actual strings later.
numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
ordDataList = [0] * numBytes
for row, data in enumerate(dataRows):
bitRange = self._getBitRange(row, bitDepth, metrics)
stepRange = bitRange + (8,)
for curBit, curByte in zip(range(*stepRange), data):
endBit = min(curBit+8, bitRange[1])
cutPoint = curBit % 8
firstByteLoc = curBit // 8
secondByteLoc = endBit // 8
if firstByteLoc < secondByteLoc:
numBitsCut = 8 - cutPoint
else:
numBitsCut = endBit - curBit
curByte = byteord(curByte)
firstByte = curByte & ((1<<numBitsCut)-1)
ordDataList[firstByteLoc] |= (firstByte << cutPoint)
if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
secondByte = (curByte >> numBitsCut) & ((1<<8-numBitsCut)-1)
ordDataList[secondByteLoc] |= secondByte
# Save the image data with the bits going the correct way.
self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
class ByteAlignedBitmapMixin(object):
def _getByteRange(self, row, bitDepth, metrics):
rowBytes = (bitDepth * metrics.width + 7) // 8
byteOffset = row * rowBytes
return (byteOffset, byteOffset+rowBytes)
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
if metrics is None:
metrics = self.metrics
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
byteRange = self._getByteRange(row, bitDepth, metrics)
data = self.imageData[slice(*byteRange)]
if reverseBytes:
data = _reverseBytes(data)
return data
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
if metrics is None:
metrics = self.metrics
if reverseBytes:
dataRows = map(_reverseBytes, dataRows)
self.imageData = bytesjoin(dataRows)
class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
self.imageData = data
def compile(self, ttFont):
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
return data + self.imageData
class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
self.imageData = data
def compile(self, ttFont):
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
return data + self.imageData
class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
def decompile(self):
self.imageData = self.data
def compile(self, ttFont):
return self.imageData
class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
self.imageData = data
def compile(self, ttFont):
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
return data + self.imageData
class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
self.imageData = data
def compile(self, ttFont):
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
return data + self.imageData
class ComponentBitmapGlyph(BitmapGlyph):
def toXML(self, strikeIndex, glyphName, writer, ttFont):
writer.begintag(self.__class__.__name__, [('name', glyphName)])
writer.newline()
self.writeMetrics(writer, ttFont)
writer.begintag('components')
writer.newline()
for curComponent in self.componentArray:
curComponent.toXML(writer, ttFont)
writer.endtag('components')
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.readMetrics(name, attrs, content, ttFont)
for element in content:
if not isinstance(element, tuple):
continue
name, attr, content = element
if name == 'components':
self.componentArray = []
for compElement in content:
if not isinstance(compElement, tuple):
continue
name, attrs, content = compElement
if name == 'ebdtComponent':
curComponent = EbdtComponent()
curComponent.fromXML(name, attrs, content, ttFont)
self.componentArray.append(curComponent)
else:
log.warning("'%s' being ignored in component array.", name)
class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
def decompile(self):
self.metrics = SmallGlyphMetrics()
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
data = data[1:]
(numComponents,) = struct.unpack(">H", data[:2])
data = data[2:]
self.componentArray = []
for i in range(numComponents):
curComponent = EbdtComponent()
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
self.componentArray.append(curComponent)
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
dataList.append(b'\0')
dataList.append(struct.pack(">H", len(self.componentArray)))
for curComponent in self.componentArray:
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
return bytesjoin(dataList)
class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
def decompile(self):
self.metrics = BigGlyphMetrics()
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
(numComponents,) = struct.unpack(">H", data[:2])
data = data[2:]
self.componentArray = []
for i in range(numComponents):
curComponent = EbdtComponent()
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
self.componentArray.append(curComponent)
def compile(self, ttFont):
dataList = []
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
dataList.append(struct.pack(">H", len(self.componentArray)))
for curComponent in self.componentArray:
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
return bytesjoin(dataList)
# Dictionary of bitmap formats to the class representing that format
# currently only the ones listed in this map are the ones supported.
ebdt_bitmap_classes = {
1: ebdt_bitmap_format_1,
2: ebdt_bitmap_format_2,
5: ebdt_bitmap_format_5,
6: ebdt_bitmap_format_6,
7: ebdt_bitmap_format_7,
8: ebdt_bitmap_format_8,
9: ebdt_bitmap_format_9,
}
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/E_B_D_T_.py",
"copies": "5",
"size": "27208",
"license": "apache-2.0",
"hash": 5467771397501304000,
"line_mean": 34.7060367454,
"line_max": 120,
"alpha_frac": 0.7298221111,
"autogenerated": false,
"ratio": 3.351977331526426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6581799442626426,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytechr, byteord, bytesjoin, tobytes, tostr
from fontTools.misc import eexec
from .psOperators import (
PSOperators,
ps_StandardEncoding,
ps_array,
ps_boolean,
ps_dict,
ps_integer,
ps_literal,
ps_mark,
ps_name,
ps_operator,
ps_procedure,
ps_procmark,
ps_real,
ps_string,
)
import re
from collections.abc import Callable
from string import whitespace
import logging
log = logging.getLogger(__name__)
ps_special = b'()<>[]{}%' # / is one too, but we take care of that one differently
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
endofthingRE = re.compile(endofthingPat)
commentRE = re.compile(b"%[^\n\r]*")
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
stringPat = br"""
\(
(
(
[^()]* \ [()]
)
|
(
[^()]* \( [^()]* \)
)
)*
[^()]*
\)
"""
stringPat = b"".join(stringPat.split())
stringRE = re.compile(stringPat)
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
class PSTokenError(Exception): pass
class PSError(Exception): pass
class PSTokenizer(object):
def __init__(self, buf=b'', encoding="ascii"):
# Force self.buf to be a byte string
buf = tobytes(buf)
self.buf = buf
self.len = len(buf)
self.pos = 0
self.closed = False
self.encoding = encoding
def read(self, n=-1):
"""Read at most 'n' bytes from the buffer, or less if the read
hits EOF before obtaining 'n' bytes.
If 'n' is negative or omitted, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if n is None or n < 0:
newpos = self.len
else:
newpos = min(self.pos+n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
def close(self):
if not self.closed:
self.closed = True
del self.buf, self.pos
def getnexttoken(self,
# localize some stuff, for performance
len=len,
ps_special=ps_special,
stringmatch=stringRE.match,
hexstringmatch=hexstringRE.match,
commentmatch=commentRE.match,
endmatch=endofthingRE.match):
self.skipwhite()
if self.pos >= self.len:
return None, None
pos = self.pos
buf = self.buf
char = bytechr(byteord(buf[pos]))
if char in ps_special:
if char in b'{}[]':
tokentype = 'do_special'
token = char
elif char == b'%':
tokentype = 'do_comment'
_, nextpos = commentmatch(buf, pos).span()
token = buf[pos:nextpos]
elif char == b'(':
tokentype = 'do_string'
m = stringmatch(buf, pos)
if m is None:
raise PSTokenError('bad string at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
elif char == b'<':
tokentype = 'do_hexstring'
m = hexstringmatch(buf, pos)
if m is None:
raise PSTokenError('bad hexstring at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
else:
raise PSTokenError('bad token at character %d' % pos)
else:
if char == b'/':
tokentype = 'do_literal'
m = endmatch(buf, pos+1)
else:
tokentype = ''
m = endmatch(buf, pos)
if m is None:
raise PSTokenError('bad token at character %d' % pos)
_, nextpos = m.span()
token = buf[pos:nextpos]
self.pos = pos + len(token)
token = tostr(token, encoding=self.encoding)
return tokentype, token
def skipwhite(self, whitematch=skipwhiteRE.match):
_, nextpos = whitematch(self.buf, self.pos).span()
self.pos = nextpos
def starteexec(self):
self.pos = self.pos + 1
self.dirtybuf = self.buf[self.pos:]
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
self.len = len(self.buf)
self.pos = 4
def stopeexec(self):
if not hasattr(self, 'dirtybuf'):
return
self.buf = self.dirtybuf
del self.dirtybuf
class PSInterpreter(PSOperators):
def __init__(self, encoding="ascii"):
systemdict = {}
userdict = {}
self.encoding = encoding
self.dictstack = [systemdict, userdict]
self.stack = []
self.proclevel = 0
self.procmark = ps_procmark()
self.fillsystemdict()
def fillsystemdict(self):
systemdict = self.dictstack[0]
systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
systemdict[']'] = ps_operator(']', self.do_makearray)
systemdict['true'] = ps_boolean(1)
systemdict['false'] = ps_boolean(0)
systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
systemdict['FontDirectory'] = ps_dict({})
self.suckoperators(systemdict, self.__class__)
def suckoperators(self, systemdict, klass):
for name in dir(klass):
attr = getattr(self, name)
if isinstance(attr, Callable) and name[:3] == 'ps_':
name = name[3:]
systemdict[name] = ps_operator(name, attr)
for baseclass in klass.__bases__:
self.suckoperators(systemdict, baseclass)
def interpret(self, data, getattr=getattr):
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
getnexttoken = tokenizer.getnexttoken
do_token = self.do_token
handle_object = self.handle_object
try:
while 1:
tokentype, token = getnexttoken()
if not token:
break
if tokentype:
handler = getattr(self, tokentype)
object = handler(token)
else:
object = do_token(token)
if object is not None:
handle_object(object)
tokenizer.close()
self.tokenizer = None
except:
if self.tokenizer is not None:
log.debug(
'ps error:\n'
'- - - - - - -\n'
'%s\n'
'>>>\n'
'%s\n'
'- - - - - - -',
self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos],
self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
raise
def handle_object(self, object):
if not (self.proclevel or object.literal or object.type == 'proceduretype'):
if object.type != 'operatortype':
object = self.resolve_name(object.value)
if object.literal:
self.push(object)
else:
if object.type == 'proceduretype':
self.call_procedure(object)
else:
object.function()
else:
self.push(object)
def call_procedure(self, proc):
handle_object = self.handle_object
for item in proc.value:
handle_object(item)
def resolve_name(self, name):
dictstack = self.dictstack
for i in range(len(dictstack)-1, -1, -1):
if name in dictstack[i]:
return dictstack[i][name]
raise PSError('name error: ' + str(name))
def do_token(self, token,
int=int,
float=float,
ps_name=ps_name,
ps_integer=ps_integer,
ps_real=ps_real):
try:
num = int(token)
except (ValueError, OverflowError):
try:
num = float(token)
except (ValueError, OverflowError):
if '#' in token:
hashpos = token.find('#')
try:
base = int(token[:hashpos])
num = int(token[hashpos+1:], base)
except (ValueError, OverflowError):
return ps_name(token)
else:
return ps_integer(num)
else:
return ps_name(token)
else:
return ps_real(num)
else:
return ps_integer(num)
def do_comment(self, token):
pass
def do_literal(self, token):
return ps_literal(token[1:])
def do_string(self, token):
return ps_string(token[1:-1])
def do_hexstring(self, token):
hexStr = "".join(token[1:-1].split())
if len(hexStr) % 2:
hexStr = hexStr + '0'
cleanstr = []
for i in range(0, len(hexStr), 2):
cleanstr.append(chr(int(hexStr[i:i+2], 16)))
cleanstr = "".join(cleanstr)
return ps_string(cleanstr)
def do_special(self, token):
if token == '{':
self.proclevel = self.proclevel + 1
return self.procmark
elif token == '}':
proc = []
while 1:
topobject = self.pop()
if topobject == self.procmark:
break
proc.append(topobject)
self.proclevel = self.proclevel - 1
proc.reverse()
return ps_procedure(proc)
elif token == '[':
return self.mark
elif token == ']':
return ps_name(']')
else:
raise PSTokenError('huh?')
def push(self, object):
self.stack.append(object)
def pop(self, *types):
stack = self.stack
if not stack:
raise PSError('stack underflow')
object = stack[-1]
if types:
if object.type not in types:
raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
del stack[-1]
return object
def do_makearray(self):
array = []
while 1:
topobject = self.pop()
if topobject == self.mark:
break
array.append(topobject)
array.reverse()
self.push(ps_array(array))
def close(self):
"""Remove circular references."""
del self.stack
del self.dictstack
def unpack_item(item):
tp = type(item.value)
if tp == dict:
newitem = {}
for key, value in item.value.items():
newitem[key] = unpack_item(value)
elif tp == list:
newitem = [None] * len(item.value)
for i in range(len(item.value)):
newitem[i] = unpack_item(item.value[i])
if item.type == 'proceduretype':
newitem = tuple(newitem)
else:
newitem = item.value
return newitem
def suckfont(data, encoding="ascii"):
m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
if m:
fontName = m.group(1)
else:
fontName = None
interpreter = PSInterpreter(encoding=encoding)
interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
interpreter.interpret(data)
fontdir = interpreter.dictstack[0]['FontDirectory'].value
if fontName in fontdir:
rawfont = fontdir[fontName]
else:
# fall back, in case fontName wasn't found
fontNames = list(fontdir.keys())
if len(fontNames) > 1:
fontNames.remove("Helvetica")
fontNames.sort()
rawfont = fontdir[fontNames[0]]
interpreter.close()
return unpack_item(rawfont)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/misc/psLib.py",
"copies": "5",
"size": "9568",
"license": "apache-2.0",
"hash": -7469683989417939000,
"line_mean": 23.9166666667,
"line_max": 94,
"alpha_frac": 0.6484113712,
"autogenerated": false,
"ratio": 2.8510131108462455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5999424482046245,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytechr, byteord, strjoin
from fontTools.misc import sstruct
from . import DefaultTable
import array
from collections.abc import Mapping
hdmxHeaderFormat = """
> # big endian!
version: H
numRecords: H
recordSize: l
"""
class _GlyphnamedList(Mapping):
def __init__(self, reverseGlyphOrder, data):
self._array = data
self._map = dict(reverseGlyphOrder)
def __getitem__(self, k):
return self._array[self._map[k]]
def __len__(self):
return len(self._map)
def __iter__(self):
return iter(self._map)
def keys(self):
return self._map.keys()
class table__h_d_m_x(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
self.hdmx = {}
for i in range(self.numRecords):
ppem = byteord(data[0])
maxSize = byteord(data[1])
widths = _GlyphnamedList(ttFont.getReverseGlyphMap(), array.array("B", data[2:2+numGlyphs]))
self.hdmx[ppem] = widths
data = data[self.recordSize:]
assert len(data) == 0, "too much hdmx data"
def compile(self, ttFont):
self.version = 0
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
pad = (self.recordSize - 2 - numGlyphs) * b"\0"
self.numRecords = len(self.hdmx)
data = sstruct.pack(hdmxHeaderFormat, self)
items = sorted(self.hdmx.items())
for ppem, widths in items:
data = data + bytechr(ppem) + bytechr(max(widths.values()))
for glyphID in range(len(glyphOrder)):
width = widths[glyphOrder[glyphID]]
data = data + bytechr(width)
data = data + pad
return data
def toXML(self, writer, ttFont):
writer.begintag("hdmxData")
writer.newline()
ppems = sorted(self.hdmx.keys())
records = []
format = ""
for ppem in ppems:
widths = self.hdmx[ppem]
records.append(widths)
format = format + "%4d"
glyphNames = ttFont.getGlyphOrder()[:]
glyphNames.sort()
maxNameLen = max(map(len, glyphNames))
format = "%" + repr(maxNameLen) + 's:' + format + ' ;'
writer.write(format % (("ppem",) + tuple(ppems)))
writer.newline()
writer.newline()
for glyphName in glyphNames:
row = []
for ppem in ppems:
widths = self.hdmx[ppem]
row.append(widths[glyphName])
if ";" in glyphName:
glyphName = "\\x3b".join(glyphName.split(";"))
writer.write(format % ((glyphName,) + tuple(row)))
writer.newline()
writer.endtag("hdmxData")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name != "hdmxData":
return
content = strjoin(content)
lines = content.split(";")
topRow = lines[0].split()
assert topRow[0] == "ppem:", "illegal hdmx format"
ppems = list(map(int, topRow[1:]))
self.hdmx = hdmx = {}
for ppem in ppems:
hdmx[ppem] = {}
lines = (line.split() for line in lines[1:])
for line in lines:
if not line:
continue
assert line[0][-1] == ":", "illegal hdmx format"
glyphName = line[0][:-1]
if "\\" in glyphName:
from fontTools.misc.textTools import safeEval
glyphName = safeEval('"""' + glyphName + '"""')
line = list(map(int, line[1:]))
assert len(line) == len(ppems), "illegal hdmx format"
for i in range(len(ppems)):
hdmx[ppems[i]][glyphName] = line[i]
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_d_m_x.py",
"copies": "5",
"size": "3335",
"license": "apache-2.0",
"hash": -7135055486401276000,
"line_mean": 27.75,
"line_max": 95,
"alpha_frac": 0.6554722639,
"autogenerated": false,
"ratio": 2.7653399668325043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03147134479688017,
"num_lines": 116
} |
from fontTools.misc.py23 import bytechr, byteord, tobytes, tostr
from fontTools import ttLib
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, readHex
from . import DefaultTable
import sys
import struct
import array
postFormat = """
>
formatType: 16.16F
italicAngle: 16.16F # italic angle in degrees
underlinePosition: h
underlineThickness: h
isFixedPitch: L
minMemType42: L # minimum memory if TrueType font is downloaded
maxMemType42: L # maximum memory if TrueType font is downloaded
minMemType1: L # minimum memory if Type1 font is downloaded
maxMemType1: L # maximum memory if Type1 font is downloaded
"""
postFormatSize = sstruct.calcsize(postFormat)
class table__p_o_s_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
sstruct.unpack(postFormat, data[:postFormatSize], self)
data = data[postFormatSize:]
if self.formatType == 1.0:
self.decode_format_1_0(data, ttFont)
elif self.formatType == 2.0:
self.decode_format_2_0(data, ttFont)
elif self.formatType == 3.0:
self.decode_format_3_0(data, ttFont)
elif self.formatType == 4.0:
self.decode_format_4_0(data, ttFont)
else:
# supported format
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
def compile(self, ttFont):
data = sstruct.pack(postFormat, self)
if self.formatType == 1.0:
pass # we're done
elif self.formatType == 2.0:
data = data + self.encode_format_2_0(ttFont)
elif self.formatType == 3.0:
pass # we're done
elif self.formatType == 4.0:
data = data + self.encode_format_4_0(ttFont)
else:
# supported format
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
return data
def getGlyphOrder(self):
"""This function will get called by a ttLib.TTFont instance.
Do not call this function yourself, use TTFont().getGlyphOrder()
or its relatives instead!
"""
if not hasattr(self, "glyphOrder"):
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
glyphOrder = self.glyphOrder
del self.glyphOrder
return glyphOrder
def decode_format_1_0(self, data, ttFont):
self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
def decode_format_2_0(self, data, ttFont):
numGlyphs, = struct.unpack(">H", data[:2])
numGlyphs = int(numGlyphs)
if numGlyphs > ttFont['maxp'].numGlyphs:
# Assume the numGlyphs field is bogus, so sync with maxp.
# I've seen this in one font, and if the assumption is
# wrong elsewhere, well, so be it: it's hard enough to
# work around _one_ non-conforming post format...
numGlyphs = ttFont['maxp'].numGlyphs
data = data[2:]
indices = array.array("H")
indices.frombytes(data[:2*numGlyphs])
if sys.byteorder != "big": indices.byteswap()
data = data[2*numGlyphs:]
self.extraNames = extraNames = unpackPStrings(data)
self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
for glyphID in range(numGlyphs):
index = indices[glyphID]
if index > 257:
try:
name = extraNames[index-258]
except IndexError:
name = ""
else:
# fetch names from standard list
name = standardGlyphOrder[index]
glyphOrder[glyphID] = name
self.build_psNameMapping(ttFont)
def build_psNameMapping(self, ttFont):
mapping = {}
allNames = {}
for i in range(ttFont['maxp'].numGlyphs):
glyphName = psName = self.glyphOrder[i]
if glyphName == "":
glyphName = "glyph%.5d" % i
if glyphName in allNames:
# make up a new glyphName that's unique
n = allNames[glyphName]
while (glyphName + "#" + str(n)) in allNames:
n += 1
allNames[glyphName] = n + 1
glyphName = glyphName + "#" + str(n)
self.glyphOrder[i] = glyphName
allNames[glyphName] = 1
if glyphName != psName:
mapping[glyphName] = psName
self.mapping = mapping
def decode_format_3_0(self, data, ttFont):
# Setting self.glyphOrder to None will cause the TTFont object
# try and construct glyph names from a Unicode cmap table.
self.glyphOrder = None
def decode_format_4_0(self, data, ttFont):
from fontTools import agl
numGlyphs = ttFont['maxp'].numGlyphs
indices = array.array("H")
indices.frombytes(data)
if sys.byteorder != "big": indices.byteswap()
# In some older fonts, the size of the post table doesn't match
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
for i in range(min(len(indices),numGlyphs)):
if indices[i] == 0xFFFF:
self.glyphOrder[i] = ''
elif indices[i] in agl.UV2AGL:
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
else:
self.glyphOrder[i] = "uni%04X" % indices[i]
self.build_psNameMapping(ttFont)
def encode_format_2_0(self, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
assert len(glyphOrder) == numGlyphs
indices = array.array("H")
extraDict = {}
extraNames = self.extraNames = [
n for n in self.extraNames if n not in standardGlyphOrder]
for i in range(len(extraNames)):
extraDict[extraNames[i]] = i
for glyphID in range(numGlyphs):
glyphName = glyphOrder[glyphID]
if glyphName in self.mapping:
psName = self.mapping[glyphName]
else:
psName = glyphName
if psName in extraDict:
index = 258 + extraDict[psName]
elif psName in standardGlyphOrder:
index = standardGlyphOrder.index(psName)
else:
index = 258 + len(extraNames)
extraDict[psName] = len(extraNames)
extraNames.append(psName)
indices.append(index)
if sys.byteorder != "big": indices.byteswap()
return struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
def encode_format_4_0(self, ttFont):
from fontTools import agl
numGlyphs = ttFont['maxp'].numGlyphs
glyphOrder = ttFont.getGlyphOrder()
assert len(glyphOrder) == numGlyphs
indices = array.array("H")
for glyphID in glyphOrder:
glyphID = glyphID.split('#')[0]
if glyphID in agl.AGL2UV:
indices.append(agl.AGL2UV[glyphID])
elif len(glyphID) == 7 and glyphID[:3] == 'uni':
indices.append(int(glyphID[3:],16))
else:
indices.append(0xFFFF)
if sys.byteorder != "big": indices.byteswap()
return indices.tobytes()
def toXML(self, writer, ttFont):
formatstring, names, fixes = sstruct.getformat(postFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
if hasattr(self, "mapping"):
writer.begintag("psNames")
writer.newline()
writer.comment("This file uses unique glyph names based on the information\n"
"found in the 'post' table. Since these names might not be unique,\n"
"we have to invent artificial names in case of clashes. In order to\n"
"be able to retain the original information, we need a name to\n"
"ps name mapping for those cases where they differ. That's what\n"
"you see below.\n")
writer.newline()
items = sorted(self.mapping.items())
for name, psName in items:
writer.simpletag("psName", name=name, psName=psName)
writer.newline()
writer.endtag("psNames")
writer.newline()
if hasattr(self, "extraNames"):
writer.begintag("extraNames")
writer.newline()
writer.comment("following are the name that are not taken from the standard Mac glyph order")
writer.newline()
for name in self.extraNames:
writer.simpletag("psName", name=name)
writer.newline()
writer.endtag("extraNames")
writer.newline()
if hasattr(self, "data"):
writer.begintag("hexdata")
writer.newline()
writer.dumphex(self.data)
writer.endtag("hexdata")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name not in ("psNames", "extraNames", "hexdata"):
setattr(self, name, safeEval(attrs["value"]))
elif name == "psNames":
self.mapping = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "psName":
self.mapping[attrs["name"]] = attrs["psName"]
elif name == "extraNames":
self.extraNames = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "psName":
self.extraNames.append(attrs["name"])
else:
self.data = readHex(content)
def unpackPStrings(data):
strings = []
index = 0
dataLen = len(data)
while index < dataLen:
length = byteord(data[index])
strings.append(tostr(data[index+1:index+1+length], encoding="latin1"))
index = index + 1 + length
return strings
def packPStrings(strings):
data = b""
for s in strings:
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
return data
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_p_o_s_t.py",
"copies": "3",
"size": "8760",
"license": "apache-2.0",
"hash": -7654716654882433000,
"line_mean": 31.4444444444,
"line_max": 96,
"alpha_frac": 0.6926940639,
"autogenerated": false,
"ratio": 3.0448383733055264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5237532437205527,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytechr, byteord, tobytes, tostr
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
SINGFormat = """
> # big endian
tableVersionMajor: H
tableVersionMinor: H
glyphletVersion: H
permissions: h
mainGID: H
unitsPerEm: H
vertAdvance: h
vertOrigin: h
uniqueName: 28s
METAMD5: 16s
nameLength: 1s
"""
# baseGlyphName is a byte string which follows the record above.
class table_S_I_N_G_(DefaultTable.DefaultTable):
dependencies = []
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
self.uniqueName = self.decompileUniqueName(self.uniqueName)
self.nameLength = byteord(self.nameLength)
assert len(rest) == self.nameLength
self.baseGlyphName = tostr(rest)
rawMETAMD5 = self.METAMD5
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
for char in rawMETAMD5[1:]:
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
self.METAMD5 = self.METAMD5 + "]"
def decompileUniqueName(self, data):
name = ""
for char in data:
val = byteord(char)
if val == 0:
break
if (val > 31) or (val < 128):
name += chr(val)
else:
octString = oct(val)
if len(octString) > 3:
octString = octString[1:] # chop off that leading zero.
elif len(octString) < 3:
octString.zfill(3)
name += "\\" + octString
return name
def compile(self, ttFont):
d = self.__dict__.copy()
d["nameLength"] = bytechr(len(self.baseGlyphName))
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
METAMD5List = eval(self.METAMD5)
d["METAMD5"] = b""
for val in METAMD5List:
d["METAMD5"] += bytechr(val)
assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
data = sstruct.pack(SINGFormat, d)
data = data + tobytes(self.baseGlyphName)
return data
def compilecompileUniqueName(self, name, length):
nameLen = len(name)
if length <= nameLen:
name = name[:length-1] + "\000"
else:
name += (nameLen - length) * "\000"
return name
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(SINGFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
setattr(self, name, value)
else:
setattr(self, name, safeEval(value))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_I_N_G_.py",
"copies": "5",
"size": "2692",
"license": "apache-2.0",
"hash": -2263341435637270800,
"line_mean": 27.6382978723,
"line_max": 83,
"alpha_frac": 0.6831352155,
"autogenerated": false,
"ratio": 2.8547189819724283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6037854197472429,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import byteord
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import floatToFixedToStr
from fontTools.misc.textTools import safeEval
# from itertools import *
from . import DefaultTable
from . import grUtils
from array import array
from functools import reduce
import struct, re, sys
Silf_hdr_format = '''
>
version: 16.16F
'''
Silf_hdr_format_3 = '''
>
version: 16.16F
compilerVersion: L
numSilf: H
x
x
'''
Silf_part1_format_v3 = '''
>
ruleVersion: 16.16F
passOffset: H
pseudosOffset: H
'''
Silf_part1_format = '''
>
maxGlyphID: H
extraAscent: h
extraDescent: h
numPasses: B
iSubst: B
iPos: B
iJust: B
iBidi: B
flags: B
maxPreContext: B
maxPostContext: B
attrPseudo: B
attrBreakWeight: B
attrDirectionality: B
attrMirroring: B
attrSkipPasses: B
numJLevels: B
'''
Silf_justify_format = '''
>
attrStretch: B
attrShrink: B
attrStep: B
attrWeight: B
runto: B
x
x
x
'''
Silf_part2_format = '''
>
numLigComp: H
numUserDefn: B
maxCompPerLig: B
direction: B
attCollisions: B
x
x
x
numCritFeatures: B
'''
Silf_pseudomap_format = '''
>
unicode: L
nPseudo: H
'''
Silf_pseudomap_format_h = '''
>
unicode: H
nPseudo: H
'''
Silf_classmap_format = '''
>
numClass: H
numLinear: H
'''
Silf_lookupclass_format = '''
>
numIDs: H
searchRange: H
entrySelector: H
rangeShift: H
'''
Silf_lookuppair_format = '''
>
glyphId: H
index: H
'''
Silf_pass_format = '''
>
flags: B
maxRuleLoop: B
maxRuleContext: B
maxBackup: B
numRules: H
fsmOffset: H
pcCode: L
rcCode: L
aCode: L
oDebug: L
numRows: H
numTransitional: H
numSuccess: H
numColumns: H
'''
aCode_info = (
("NOP", 0),
("PUSH_BYTE", "b"),
("PUSH_BYTE_U", "B"),
("PUSH_SHORT", ">h"),
("PUSH_SHORT_U", ">H"),
("PUSH_LONG", ">L"),
("ADD", 0),
("SUB", 0),
("MUL", 0),
("DIV", 0),
("MIN", 0),
("MAX", 0),
("NEG", 0),
("TRUNC8", 0),
("TRUNC16", 0),
("COND", 0),
("AND", 0), # x10
("OR", 0),
("NOT", 0),
("EQUAL", 0),
("NOT_EQ", 0),
("LESS", 0),
("GTR", 0),
("LESS_EQ", 0),
("GTR_EQ", 0),
("NEXT", 0),
("NEXT_N", "b"),
("COPY_NEXT", 0),
("PUT_GLYPH_8BIT_OBS", "B"),
("PUT_SUBS_8BIT_OBS", "bBB"),
("PUT_COPY", "b"),
("INSERT", 0),
("DELETE", 0), # x20
("ASSOC", -1),
("CNTXT_ITEM", "bB"),
("ATTR_SET", "B"),
("ATTR_ADD", "B"),
("ATTR_SUB", "B"),
("ATTR_SET_SLOT", "B"),
("IATTR_SET_SLOT", "BB"),
("PUSH_SLOT_ATTR", "Bb"),
("PUSH_GLYPH_ATTR_OBS", "Bb"),
("PUSH_GLYPH_METRIC", "Bbb"),
("PUSH_FEAT", "Bb"),
("PUSH_ATT_TO_GATTR_OBS", "Bb"),
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
("PUSH_ISLOT_ATTR", "Bbb"),
("PUSH_IGLYPH_ATTR", "Bbb"),
("POP_RET", 0), # x30
("RET_ZERO", 0),
("RET_TRUE", 0),
("IATTR_SET", "BB"),
("IATTR_ADD", "BB"),
("IATTR_SUB", "BB"),
("PUSH_PROC_STATE", "B"),
("PUSH_VERSION", 0),
("PUT_SUBS", ">bHH"),
("PUT_SUBS2", 0),
("PUT_SUBS3", 0),
("PUT_GLYPH", ">H"),
("PUSH_GLYPH_ATTR", ">Hb"),
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
("BITOR", 0),
("BITAND", 0),
("BITNOT", 0), # x40
("BITSET", ">HH"),
("SET_FEAT", "Bb")
)
aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
def disassemble(aCode):
codelen = len(aCode)
pc = 0
res = []
while pc < codelen:
opcode = byteord(aCode[pc:pc+1])
if opcode > len(aCode_info):
instr = aCode_info[0]
else:
instr = aCode_info[opcode]
pc += 1
if instr[1] != 0 and pc >= codelen : return res
if instr[1] == -1:
count = byteord(aCode[pc])
fmt = "%dB" % count
pc += 1
elif instr[1] == 0:
fmt = ""
else :
fmt = instr[1]
if fmt == "":
res.append(instr[0])
continue
parms = struct.unpack_from(fmt, aCode[pc:])
res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")")
pc += struct.calcsize(fmt)
return res
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
def assemble(instrs):
res = b""
for inst in instrs:
m = instre.match(inst)
if not m or not m.group(1) in aCode_map:
continue
opcode, parmfmt = aCode_map[m.group(1)]
res += struct.pack("B", opcode)
if m.group(2):
if parmfmt == 0:
continue
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
if parmfmt == -1:
l = len(parms)
res += struct.pack(("%dB" % (l+1)), l, *parms)
else:
res += struct.pack(parmfmt, *parms)
return res
def writecode(tag, writer, instrs):
writer.begintag(tag)
writer.newline()
for l in disassemble(instrs):
writer.write(l)
writer.newline()
writer.endtag(tag)
writer.newline()
def readcode(content):
res = []
for e in content_string(content).split('\n'):
e = e.strip()
if not len(e): continue
res.append(e)
return assemble(res)
attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
attrs_contexts = ('maxPreContext', 'maxPostContext')
attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
'attrMirroring', 'attrSkipPasses', 'attCollisions')
pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
def writesimple(tag, self, writer, *attrkeys):
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
writer.simpletag(tag, **attrs)
writer.newline()
def getSimple(self, attrs, *attr_list):
for k in attr_list:
if k in attrs:
setattr(self, k, int(safeEval(attrs[k])))
def content_string(contents):
res = ""
for element in contents:
if isinstance(element, tuple): continue
res += element
return res.strip()
def wrapline(writer, dat, length=80):
currline = ""
for d in dat:
if len(currline) > length:
writer.write(currline[:-1])
writer.newline()
currline = ""
currline += d + " "
if len(currline):
writer.write(currline[:-1])
writer.newline()
class _Object() :
pass
class table_S__i_l_f(DefaultTable.DefaultTable):
'''Silf table support'''
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.silfs = []
def decompile(self, data, ttFont):
sstruct.unpack2(Silf_hdr_format, data, self)
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
if self.version >= 5.0:
(data, self.scheme) = grUtils.decompress(data)
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
elif self.version < 3.0:
self.numSilf = struct.unpack('>H', data[4:6])
self.scheme = 0
self.compilerVersion = 0
base = 8
else:
self.scheme = 0
sstruct.unpack2(Silf_hdr_format_3, data, self)
base = sstruct.calcsize(Silf_hdr_format_3)
silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
for offset in silfoffsets:
s = Silf()
self.silfs.append(s)
s.decompile(data[offset:], ttFont, self.version)
def compile(self, ttFont):
self.numSilf = len(self.silfs)
if self.version < 3.0:
hdr = sstruct.pack(Silf_hdr_format, self)
hdr += struct.pack(">HH", self.numSilf, 0)
else:
hdr = sstruct.pack(Silf_hdr_format_3, self)
offset = len(hdr) + 4 * self.numSilf
data = b""
for s in self.silfs:
hdr += struct.pack(">L", offset)
subdata = s.compile(ttFont, self.version)
offset += len(subdata)
data += subdata
if self.version >= 5.0:
return grUtils.compress(self.scheme, hdr+data)
return hdr+data
def toXML(self, writer, ttFont):
writer.comment('Attributes starting with _ are informative only')
writer.newline()
writer.simpletag('version', version=self.version,
compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
writer.newline()
for s in self.silfs:
writer.begintag('silf')
writer.newline()
s.toXML(writer, ttFont, self.version)
writer.endtag('silf')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version':
self.scheme=int(safeEval(attrs['compressionScheme']))
self.version = float(safeEval(attrs['version']))
self.compilerVersion = int(safeEval(attrs['compilerVersion']))
return
if name == 'silf':
s = Silf()
self.silfs.append(s)
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
class Silf(object):
'''A particular Silf subtable'''
def __init__(self):
self.passes = []
self.scriptTags = []
self.critFeatures = []
self.jLevels = []
self.pMap = {}
def decompile(self, data, ttFont, version=2.0):
if version >= 3.0 :
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
_, data = sstruct.unpack2(Silf_part1_format, data, self)
for jlevel in range(self.numJLevels):
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
self.jLevels.append(j)
_, data = sstruct.unpack2(Silf_part2_format, data, self)
if self.numCritFeatures:
self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
data = data[self.numCritFeatures * 2 + 1:]
(numScriptTag,) = struct.unpack_from('B', data)
if numScriptTag:
self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
data = data[1 + 4 * numScriptTag:]
(self.lbGID,) = struct.unpack('>H', data[:2])
if self.numPasses:
self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
data = data[6 + 4 * self.numPasses:]
(numPseudo,) = struct.unpack(">H", data[:2])
for i in range(numPseudo):
if version >= 3.0:
pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
else:
pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
data = data[8 + 6 * numPseudo:]
currpos = (sstruct.calcsize(Silf_part1_format)
+ sstruct.calcsize(Silf_justify_format) * self.numJLevels
+ sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
+ 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
if version >= 3.0:
currpos += sstruct.calcsize(Silf_part1_format_v3)
self.classes = Classes()
self.classes.decompile(data, ttFont, version)
for i in range(self.numPasses):
p = Pass()
self.passes.append(p)
p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
ttFont, version)
def compile(self, ttFont, version=2.0):
self.numPasses = len(self.passes)
self.numJLevels = len(self.jLevels)
self.numCritFeatures = len(self.critFeatures)
numPseudo = len(self.pMap)
data = b""
if version >= 3.0:
hdroffset = sstruct.calcsize(Silf_part1_format_v3)
else:
hdroffset = 0
data += sstruct.pack(Silf_part1_format, self)
for j in self.jLevels:
data += sstruct.pack(Silf_justify_format, j)
data += sstruct.pack(Silf_part2_format, self)
if self.numCritFeatures:
data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
data += struct.pack("BB", 0, len(self.scriptTags))
if len(self.scriptTags):
tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
data += b"".join(tdata)
data += struct.pack(">H", self.lbGID)
self.passOffset = len(data)
data1 = grUtils.bininfo(numPseudo, 6)
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
self.pseudosOffset = currpos + len(data1)
for u, p in sorted(self.pMap.items()):
data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
u, ttFont.getGlyphID(p))
data1 += self.classes.compile(ttFont, version)
currpos += len(data1)
data2 = b""
datao = b""
for i, p in enumerate(self.passes):
base = currpos + len(data2)
datao += struct.pack(">L", base)
data2 += p.compile(ttFont, base, version)
datao += struct.pack(">L", currpos + len(data2))
if version >= 3.0:
data3 = sstruct.pack(Silf_part1_format_v3, self)
else:
data3 = b""
return data3 + data + datao + data1 + data2
def toXML(self, writer, ttFont, version=2.0):
if version >= 3.0:
writer.simpletag('version', ruleVersion=self.ruleVersion)
writer.newline()
writesimple('info', self, writer, *attrs_info)
writesimple('passindexes', self, writer, *attrs_passindexes)
writesimple('contexts', self, writer, *attrs_contexts)
writesimple('attributes', self, writer, *attrs_attributes)
if len(self.jLevels):
writer.begintag('justifications')
writer.newline()
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
for i, j in enumerate(self.jLevels):
attrs = dict([(k, getattr(j, k)) for k in jnames])
writer.simpletag('justify', **attrs)
writer.newline()
writer.endtag('justifications')
writer.newline()
if len(self.critFeatures):
writer.begintag('critFeatures')
writer.newline()
writer.write(" ".join(map(str, self.critFeatures)))
writer.newline()
writer.endtag('critFeatures')
writer.newline()
if len(self.scriptTags):
writer.begintag('scriptTags')
writer.newline()
writer.write(" ".join(self.scriptTags))
writer.newline()
writer.endtag('scriptTags')
writer.newline()
if self.pMap:
writer.begintag('pseudoMap')
writer.newline()
for k, v in sorted(self.pMap.items()):
writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
writer.newline()
writer.endtag('pseudoMap')
writer.newline()
self.classes.toXML(writer, ttFont, version)
if len(self.passes):
writer.begintag('passes')
writer.newline()
for i, p in enumerate(self.passes):
writer.begintag('pass', _index=i)
writer.newline()
p.toXML(writer, ttFont, version)
writer.endtag('pass')
writer.newline()
writer.endtag('passes')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'version':
self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
if name == 'info':
getSimple(self, attrs, *attrs_info)
elif name == 'passindexes':
getSimple(self, attrs, *attrs_passindexes)
elif name == 'contexts':
getSimple(self, attrs, *attrs_contexts)
elif name == 'attributes':
getSimple(self, attrs, *attrs_attributes)
elif name == 'justifications':
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'justify':
j = _Object()
for k, v in attrs.items():
setattr(j, k, int(v))
self.jLevels.append(j)
elif name == 'critFeatures':
self.critFeatures = []
element = content_string(content)
self.critFeatures.extend(map(int, element.split()))
elif name == 'scriptTags':
self.scriptTags = []
element = content_string(content)
for n in element.split():
self.scriptTags.append(n)
elif name == 'pseudoMap':
self.pMap = {}
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'pseudo':
k = int(attrs['unicode'], 16)
v = attrs['pseudo']
self.pMap[k] = v
elif name == 'classes':
self.classes = Classes()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
elif name == 'passes':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'pass':
p = Pass()
for e in subcontent:
if not isinstance(e, tuple): continue
p.fromXML(e[0], e[1], e[2], ttFont, version)
self.passes.append(p)
class Classes(object):
def __init__(self):
self.linear = []
self.nonLinear = []
def decompile(self, data, ttFont, version=2.0):
sstruct.unpack2(Silf_classmap_format, data, self)
if version >= 4.0 :
oClasses = struct.unpack((">%dL" % (self.numClass+1)),
data[4:8+4*self.numClass])
else:
oClasses = struct.unpack((">%dH" % (self.numClass+1)),
data[4:6+2*self.numClass])
for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
self.linear.append(ttFont.getGlyphName(x) for x in
struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
for s,e in zip(oClasses[self.numLinear:self.numClass],
oClasses[self.numLinear+1:self.numClass+1]):
nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
self.nonLinear.append(nonLin)
def compile(self, ttFont, version=2.0):
data = b""
oClasses = []
if version >= 4.0:
offset = 8 + 4 * (len(self.linear) + len(self.nonLinear))
else:
offset = 6 + 2 * (len(self.linear) + len(self.nonLinear))
for l in self.linear:
oClasses.append(len(data) + offset)
gs = [ttFont.getGlyphID(x) for x in l]
data += struct.pack((">%dH" % len(l)), *gs)
for l in self.nonLinear:
oClasses.append(len(data) + offset)
gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()]
data += grUtils.bininfo(len(gs))
data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)])
oClasses.append(len(data) + offset)
self.numClass = len(oClasses) - 1
self.numLinear = len(self.linear)
return sstruct.pack(Silf_classmap_format, self) + \
struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
*oClasses) + data
def toXML(self, writer, ttFont, version=2.0):
writer.begintag('classes')
writer.newline()
writer.begintag('linearClasses')
writer.newline()
for i,l in enumerate(self.linear):
writer.begintag('linear', _index=i)
writer.newline()
wrapline(writer, l)
writer.endtag('linear')
writer.newline()
writer.endtag('linearClasses')
writer.newline()
writer.begintag('nonLinearClasses')
writer.newline()
for i, l in enumerate(self.nonLinear):
writer.begintag('nonLinear', _index=i + self.numLinear)
writer.newline()
for inp, ind in l.items():
writer.simpletag('map', glyph=inp, index=ind)
writer.newline()
writer.endtag('nonLinear')
writer.newline()
writer.endtag('nonLinearClasses')
writer.newline()
writer.endtag('classes')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'linearClasses':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'linear':
l = content_string(subcontent).split()
self.linear.append(l)
elif name == 'nonLinearClasses':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag =='nonLinear':
l = {}
for e in subcontent:
if not isinstance(e, tuple): continue
tag, attrs, subsubcontent = e
if tag == 'map':
l[attrs['glyph']] = int(safeEval(attrs['index']))
self.nonLinear.append(l)
class Pass(object):
def __init__(self):
self.colMap = {}
self.rules = []
self.rulePreContexts = []
self.ruleSortKeys = []
self.ruleConstraints = []
self.passConstraints = b""
self.actions = []
self.stateTrans = []
self.startStates = []
def decompile(self, data, ttFont, version=2.0):
_, data = sstruct.unpack2(Silf_pass_format, data, self)
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
data = data[8:]
for i in range(numRange):
(first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
for g in range(first, last+1):
self.colMap[ttFont.getGlyphName(g)] = col
data = data[6*numRange:]
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
data = data[2+2*self.numSuccess:]
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
data = data[2*oRuleMap[-1]:]
(self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
self.startStates = struct.unpack((">%dH" % numStartStates),
data[2:2 + numStartStates * 2])
data = data[2+numStartStates*2:]
self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
data = data[2*self.numRules:]
self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
data = data[self.numRules:]
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
data[3:5 + self.numRules * 2]))
data = data[5 + self.numRules * 2:]
oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
data[:2 + self.numRules * 2]))
data = data[2 * self.numRules + 2:]
for i in range(self.numTransitional):
a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
if sys.byteorder != "big": a.byteswap()
self.stateTrans.append(a)
data = data[self.numTransitional * self.numColumns * 2 + 1:]
self.passConstraints = data[:pConstraint]
data = data[pConstraint:]
for i in range(len(oConstraints)-2,-1,-1):
if oConstraints[i] == 0 :
oConstraints[i] = oConstraints[i+1]
self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
data = data[oConstraints[-1]:]
self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
data = data[oActions[-1]:]
# not using debug
def compile(self, ttFont, base, version=2.0):
# build it all up backwards
oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
constraintCode = b"\000" + b"".join(self.ruleConstraints)
transes = []
for t in self.stateTrans:
if sys.byteorder != "big": t.byteswap()
transes.append(t.tobytes())
if sys.byteorder != "big": t.byteswap()
if not len(transes):
self.startStates = [0]
oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
passRanges = []
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
for e in grUtils.entries(gidcolmap, sameval = True):
if e[1]:
passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
self.numRules = len(self.actions)
self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
+ len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
+ 2 * len(self.startStates) + 3 * self.numRules + 3
+ 4 * self.numRules + 4)
self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
self.rcCode = self.pcCode + len(self.passConstraints)
self.aCode = self.rcCode + len(constraintCode)
self.oDebug = 0
# now generate output
data = sstruct.pack(Silf_pass_format, self)
data += grUtils.bininfo(len(passRanges), 6)
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
flatrules = reduce(lambda a,x: a+x, self.rules, [])
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
return data + b"".join(transes) + struct.pack("B", 0) + \
self.passConstraints + constraintCode + b"".join(self.actions)
def toXML(self, writer, ttFont, version=2.0):
writesimple('info', self, writer, *pass_attrs_info)
writesimple('fsminfo', self, writer, *pass_attrs_fsm)
writer.begintag('colmap')
writer.newline()
wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
key=lambda x:ttFont.getGlyphID(x[0]))])
writer.endtag('colmap')
writer.newline()
writer.begintag('staterulemap')
writer.newline()
for i, r in enumerate(self.rules):
writer.simpletag('state', number = self.numRows - self.numSuccess + i,
rules = " ".join(map(str, r)))
writer.newline()
writer.endtag('staterulemap')
writer.newline()
writer.begintag('rules')
writer.newline()
for i in range(len(self.actions)):
writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
sortkey=self.ruleSortKeys[i])
writer.newline()
if len(self.ruleConstraints[i]):
writecode('constraint', writer, self.ruleConstraints[i])
writecode('action', writer, self.actions[i])
writer.endtag('rule')
writer.newline()
writer.endtag('rules')
writer.newline()
if len(self.passConstraints):
writecode('passConstraint', writer, self.passConstraints)
if len(self.stateTrans):
writer.begintag('fsm')
writer.newline()
writer.begintag('starts')
writer.write(" ".join(map(str, self.startStates)))
writer.endtag('starts')
writer.newline()
for i, s in enumerate(self.stateTrans):
writer.begintag('row', _i=i)
# no newlines here
writer.write(" ".join(map(str, s)))
writer.endtag('row')
writer.newline()
writer.endtag('fsm')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'info':
getSimple(self, attrs, *pass_attrs_info)
elif name == 'fsminfo':
getSimple(self, attrs, *pass_attrs_fsm)
elif name == 'colmap':
e = content_string(content)
for w in e.split():
x = w.split('=')
if len(x) != 2 or x[0] == '' or x[1] == '': continue
self.colMap[x[0]] = int(x[1])
elif name == 'staterulemap':
for e in content:
if not isinstance(e, tuple): continue
tag, a, c = e
if tag == 'state':
self.rules.append([int(x) for x in a['rules'].split(" ")])
elif name == 'rules':
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag != 'rule': continue
self.rulePreContexts.append(int(a['precontext']))
self.ruleSortKeys.append(int(a['sortkey']))
con = b""
act = b""
for e in c:
if not isinstance(e, tuple): continue
tag, a, subc = e
if tag == 'constraint':
con = readcode(subc)
elif tag == 'action':
act = readcode(subc)
self.actions.append(act)
self.ruleConstraints.append(con)
elif name == 'passConstraint':
self.passConstraints = readcode(content)
elif name == 'fsm':
for element in content:
if not isinstance(element, tuple): continue
tag, a, c = element
if tag == 'row':
s = array('H')
e = content_string(c)
s.extend(map(int, e.split()))
self.stateTrans.append(s)
elif tag == 'starts':
s = []
e = content_string(c)
s.extend(map(int, e.split()))
self.startStates = s
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S__i_l_f.py",
"copies": "5",
"size": "33326",
"license": "apache-2.0",
"hash": 2288808698555330600,
"line_mean": 36.6139954853,
"line_max": 126,
"alpha_frac": 0.5169237232,
"autogenerated": false,
"ratio": 3.6035899653979238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6620513688597924,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import byteord
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import pdb
import struct
METAHeaderFormat = """
> # big endian
tableVersionMajor: H
tableVersionMinor: H
metaEntriesVersionMajor: H
metaEntriesVersionMinor: H
unicodeVersion: L
metaFlags: H
nMetaRecs: H
"""
# This record is followed by nMetaRecs of METAGlyphRecordFormat.
# This in turn is followd by as many METAStringRecordFormat entries
# as specified by the METAGlyphRecordFormat entries
# this is followed by the strings specifried in the METAStringRecordFormat
METAGlyphRecordFormat = """
> # big endian
glyphID: H
nMetaEntry: H
"""
# This record is followd by a variable data length field:
# USHORT or ULONG hdrOffset
# Offset from start of META table to the beginning
# of this glyphs array of ns Metadata string entries.
# Size determined by metaFlags field
# METAGlyphRecordFormat entries must be sorted by glyph ID
METAStringRecordFormat = """
> # big endian
labelID: H
stringLen: H
"""
# This record is followd by a variable data length field:
# USHORT or ULONG stringOffset
# METAStringRecordFormat entries must be sorted in order of labelID
# There may be more than one entry with the same labelID
# There may be more than one strign with the same content.
# Strings shall be Unicode UTF-8 encoded, and null-terminated.
METALabelDict = {
0: "MojikumiX4051", # An integer in the range 1-20
1: "UNIUnifiedBaseChars",
2: "BaseFontName",
3: "Language",
4: "CreationDate",
5: "FoundryName",
6: "FoundryCopyright",
7: "OwnerURI",
8: "WritingScript",
10: "StrokeCount",
11: "IndexingRadical",
}
def getLabelString(labelID):
try:
label = METALabelDict[labelID]
except KeyError:
label = "Unknown label"
return str(label)
class table_M_E_T_A_(DefaultTable.DefaultTable):
dependencies = []
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
self.glyphRecords = []
for i in range(self.nMetaRecs):
glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord())
if self.metaFlags == 0:
[glyphRecord.offset] = struct.unpack(">H", newData[:2])
newData = newData[2:]
elif self.metaFlags == 1:
[glyphRecord.offset] = struct.unpack(">H", newData[:4])
newData = newData[4:]
else:
assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags)
glyphRecord.stringRecs = []
newData = data[glyphRecord.offset:]
for j in range(glyphRecord.nMetaEntry):
stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord())
if self.metaFlags == 0:
[stringRec.offset] = struct.unpack(">H", newData[:2])
newData = newData[2:]
else:
[stringRec.offset] = struct.unpack(">H", newData[:4])
newData = newData[4:]
stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen]
glyphRecord.stringRecs.append(stringRec)
self.glyphRecords.append(glyphRecord)
def compile(self, ttFont):
offsetOK = 0
self.nMetaRecs = len(self.glyphRecords)
count = 0
while (offsetOK != 1):
count = count + 1
if count > 4:
pdb.set_trace()
metaData = sstruct.pack(METAHeaderFormat, self)
stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1))
stringRecSize = (6 + 2*(self.metaFlags & 1))
for glyphRec in self.glyphRecords:
glyphRec.offset = stringRecsOffset
if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
self.metaFlags = self.metaFlags + 1
offsetOK = -1
break
metaData = metaData + glyphRec.compile(self)
stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize)
# this will be the String Record offset for the next GlyphRecord.
if offsetOK == -1:
offsetOK = 0
continue
# metaData now contains the header and all of the GlyphRecords. Its length should bw
# the offset to the first StringRecord.
stringOffset = stringRecsOffset
for glyphRec in self.glyphRecords:
assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec)
for stringRec in glyphRec.stringRecs:
stringRec.offset = stringOffset
if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
self.metaFlags = self.metaFlags + 1
offsetOK = -1
break
metaData = metaData + stringRec.compile(self)
stringOffset = stringOffset + stringRec.stringLen
if offsetOK == -1:
offsetOK = 0
continue
if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
self.metaFlags = self.metaFlags - 1
continue
else:
offsetOK = 1
# metaData now contains the header and all of the GlyphRecords and all of the String Records.
# Its length should be the offset to the first string datum.
for glyphRec in self.glyphRecords:
for stringRec in glyphRec.stringRecs:
assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string)
metaData = metaData + stringRec.string
return metaData
def toXML(self, writer, ttFont):
writer.comment("Lengths and number of entries in this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
for glyphRec in self.glyphRecords:
glyphRec.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "GlyphRecord":
if not hasattr(self, "glyphRecords"):
self.glyphRecords = []
glyphRec = GlyphRecord()
self.glyphRecords.append(glyphRec)
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
glyphRec.fromXML(name, attrs, content, ttFont)
glyphRec.offset = -1
glyphRec.nMetaEntry = len(glyphRec.stringRecs)
else:
setattr(self, name, safeEval(attrs["value"]))
class GlyphRecord(object):
def __init__(self):
self.glyphID = -1
self.nMetaEntry = -1
self.offset = -1
self.stringRecs = []
def toXML(self, writer, ttFont):
writer.begintag("GlyphRecord")
writer.newline()
writer.simpletag("glyphID", value=self.glyphID)
writer.newline()
writer.simpletag("nMetaEntry", value=self.nMetaEntry)
writer.newline()
for stringRec in self.stringRecs:
stringRec.toXML(writer, ttFont)
writer.endtag("GlyphRecord")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "StringRecord":
stringRec = StringRecord()
self.stringRecs.append(stringRec)
for element in content:
if isinstance(element, str):
continue
stringRec.fromXML(name, attrs, content, ttFont)
stringRec.stringLen = len(stringRec.string)
else:
setattr(self, name, safeEval(attrs["value"]))
def compile(self, parentTable):
data = sstruct.pack(METAGlyphRecordFormat, self)
if parentTable.metaFlags == 0:
datum = struct.pack(">H", self.offset)
elif parentTable.metaFlags == 1:
datum = struct.pack(">L", self.offset)
data = data + datum
return data
def __repr__(self):
return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]"
# XXX The following two functions are really broken around UTF-8 vs Unicode
def mapXMLToUTF8(string):
uString = str()
strLen = len(string)
i = 0
while i < strLen:
prefixLen = 0
if (string[i:i+3] == "&#x"):
prefixLen = 3
elif (string[i:i+7] == "&#x"):
prefixLen = 7
if prefixLen:
i = i+prefixLen
j= i
while string[i] != ";":
i = i+1
valStr = string[j:i]
uString = uString + chr(eval('0x' + valStr))
else:
uString = uString + chr(byteord(string[i]))
i = i +1
return uString.encode('utf_8')
def mapUTF8toXML(string):
uString = string.decode('utf_8')
string = ""
for uChar in uString:
i = ord(uChar)
if (i < 0x80) and (i > 0x1F):
string = string + uChar
else:
string = string + "&#x" + hex(i)[2:] + ";"
return string
class StringRecord(object):
def toXML(self, writer, ttFont):
writer.begintag("StringRecord")
writer.newline()
writer.simpletag("labelID", value=self.labelID)
writer.comment(getLabelString(self.labelID))
writer.newline()
writer.newline()
writer.simpletag("string", value=mapUTF8toXML(self.string))
writer.newline()
writer.endtag("StringRecord")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
value = attrs["value"]
if name == "string":
self.string = mapXMLToUTF8(value)
else:
setattr(self, name, safeEval(value))
def compile(self, parentTable):
data = sstruct.pack(METAStringRecordFormat, self)
if parentTable.metaFlags == 0:
datum = struct.pack(">H", self.offset)
elif parentTable.metaFlags == 1:
datum = struct.pack(">L", self.offset)
data = data + datum
return data
def __repr__(self):
return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \
+ ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]"
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/M_E_T_A_.py",
"copies": "5",
"size": "9393",
"license": "apache-2.0",
"hash": -5164312249272677000,
"line_mean": 29.8980263158,
"line_max": 136,
"alpha_frac": 0.6930693069,
"autogenerated": false,
"ratio": 3.079672131147541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03314532345348272,
"num_lines": 304
} |
from fontTools.misc.py23 import byteord, tobytes
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.location import FeatureLibLocation
from fontTools.misc.encodingTools import getEncoding
from collections import OrderedDict
import itertools
SHIFT = " " * 4
__all__ = [
"Element",
"FeatureFile",
"Comment",
"GlyphName",
"GlyphClass",
"GlyphClassName",
"MarkClassName",
"AnonymousBlock",
"Block",
"FeatureBlock",
"NestedBlock",
"LookupBlock",
"GlyphClassDefinition",
"GlyphClassDefStatement",
"MarkClass",
"MarkClassDefinition",
"AlternateSubstStatement",
"Anchor",
"AnchorDefinition",
"AttachStatement",
"AxisValueLocationStatement",
"BaseAxis",
"CVParametersNameStatement",
"ChainContextPosStatement",
"ChainContextSubstStatement",
"CharacterStatement",
"CursivePosStatement",
"ElidedFallbackName",
"ElidedFallbackNameID",
"Expression",
"FeatureNameStatement",
"FeatureReferenceStatement",
"FontRevisionStatement",
"HheaField",
"IgnorePosStatement",
"IgnoreSubstStatement",
"IncludeStatement",
"LanguageStatement",
"LanguageSystemStatement",
"LigatureCaretByIndexStatement",
"LigatureCaretByPosStatement",
"LigatureSubstStatement",
"LookupFlagStatement",
"LookupReferenceStatement",
"MarkBasePosStatement",
"MarkLigPosStatement",
"MarkMarkPosStatement",
"MultipleSubstStatement",
"NameRecord",
"OS2Field",
"PairPosStatement",
"ReverseChainSingleSubstStatement",
"ScriptStatement",
"SinglePosStatement",
"SingleSubstStatement",
"SizeParameters",
"Statement",
"STATAxisValueStatement",
"STATDesignAxisStatement",
"STATNameStatement",
"SubtableStatement",
"TableBlock",
"ValueRecord",
"ValueRecordDefinition",
"VheaField",
]
def deviceToString(device):
if device is None:
return "<device NULL>"
else:
return "<device %s>" % ", ".join("%d %d" % t for t in device)
fea_keywords = set(
[
"anchor",
"anchordef",
"anon",
"anonymous",
"by",
"contour",
"cursive",
"device",
"enum",
"enumerate",
"excludedflt",
"exclude_dflt",
"feature",
"from",
"ignore",
"ignorebaseglyphs",
"ignoreligatures",
"ignoremarks",
"include",
"includedflt",
"include_dflt",
"language",
"languagesystem",
"lookup",
"lookupflag",
"mark",
"markattachmenttype",
"markclass",
"nameid",
"null",
"parameters",
"pos",
"position",
"required",
"righttoleft",
"reversesub",
"rsub",
"script",
"sub",
"substitute",
"subtable",
"table",
"usemarkfilteringset",
"useextension",
"valuerecorddef",
"base",
"gdef",
"head",
"hhea",
"name",
"vhea",
"vmtx",
]
)
def asFea(g):
if hasattr(g, "asFea"):
return g.asFea()
elif isinstance(g, tuple) and len(g) == 2:
return asFea(g[0]) + " - " + asFea(g[1]) # a range
elif g.lower() in fea_keywords:
return "\\" + g
else:
return g
class Element(object):
"""A base class representing "something" in a feature file."""
def __init__(self, location=None):
#: location of this element as a `FeatureLibLocation` object.
if location and not isinstance(location, FeatureLibLocation):
location = FeatureLibLocation(*location)
self.location = location
def build(self, builder):
pass
def asFea(self, indent=""):
"""Returns this element as a string of feature code. For block-type
elements (such as :class:`FeatureBlock`), the `indent` string is
added to the start of each line in the output."""
raise NotImplementedError
def __str__(self):
return self.asFea()
class Statement(Element):
pass
class Expression(Element):
pass
class Comment(Element):
"""A comment in a feature file."""
def __init__(self, text, location=None):
super(Comment, self).__init__(location)
#: Text of the comment
self.text = text
def asFea(self, indent=""):
return self.text
class NullGlyph(Expression):
"""The NULL glyph, used in glyph deletion substitutions."""
def __init__(self, location=None):
Expression.__init__(self, location)
#: The name itself as a string
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return ()
def asFea(self, indent=""):
return "NULL"
class GlyphName(Expression):
"""A single glyph name, such as ``cedilla``."""
def __init__(self, glyph, location=None):
Expression.__init__(self, location)
#: The name itself as a string
self.glyph = glyph
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return (self.glyph,)
def asFea(self, indent=""):
return asFea(self.glyph)
class GlyphClass(Expression):
"""A glyph class, such as ``[acute cedilla grave]``."""
def __init__(self, glyphs=None, location=None):
Expression.__init__(self, location)
#: The list of glyphs in this class, as :class:`GlyphName` objects.
self.glyphs = glyphs if glyphs is not None else []
self.original = []
self.curr = 0
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs)
def asFea(self, indent=""):
if len(self.original):
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.curr = len(self.glyphs)
return "[" + " ".join(map(asFea, self.original)) + "]"
else:
return "[" + " ".join(map(asFea, self.glyphs)) + "]"
def extend(self, glyphs):
"""Add a list of :class:`GlyphName` objects to the class."""
self.glyphs.extend(glyphs)
def append(self, glyph):
"""Add a single :class:`GlyphName` object to the class."""
self.glyphs.append(glyph)
def add_range(self, start, end, glyphs):
"""Add a range (e.g. ``A-Z``) to the class. ``start`` and ``end``
are either :class:`GlyphName` objects or strings representing the
start and end glyphs in the class, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append((start, end))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_cid_range(self, start, end, glyphs):
"""Add a range to the class by glyph ID. ``start`` and ``end`` are the
initial and final IDs, and ``glyphs`` is the full list of
:class:`GlyphName` objects in the range."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(("\\{}".format(start), "\\{}".format(end)))
self.glyphs.extend(glyphs)
self.curr = len(self.glyphs)
def add_class(self, gc):
"""Add glyphs from the given :class:`GlyphClassName` object to the
class."""
if self.curr < len(self.glyphs):
self.original.extend(self.glyphs[self.curr :])
self.original.append(gc)
self.glyphs.extend(gc.glyphSet())
self.curr = len(self.glyphs)
class GlyphClassName(Expression):
"""A glyph class name, such as ``@FRENCH_MARKS``. This must be instantiated
with a :class:`GlyphClassDefinition` object."""
def __init__(self, glyphclass, location=None):
Expression.__init__(self, location)
assert isinstance(glyphclass, GlyphClassDefinition)
self.glyphclass = glyphclass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphclass.glyphSet())
def asFea(self, indent=""):
return "@" + self.glyphclass.name
class MarkClassName(Expression):
"""A mark class name, such as ``@FRENCH_MARKS`` defined with ``markClass``.
This must be instantiated with a :class:`MarkClass` object."""
def __init__(self, markClass, location=None):
Expression.__init__(self, location)
assert isinstance(markClass, MarkClass)
self.markClass = markClass
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.markClass.glyphSet()
def asFea(self, indent=""):
return "@" + self.markClass.name
class AnonymousBlock(Statement):
"""An anonymous data block."""
def __init__(self, tag, content, location=None):
Statement.__init__(self, location)
self.tag = tag #: string containing the block's "tag"
self.content = content #: block data as string
def asFea(self, indent=""):
res = "anon {} {{\n".format(self.tag)
res += self.content
res += "}} {};\n\n".format(self.tag)
return res
class Block(Statement):
"""A block of statements: feature, lookup, etc."""
def __init__(self, location=None):
Statement.__init__(self, location)
self.statements = [] #: Statements contained in the block
def build(self, builder):
"""When handed a 'builder' object of comparable interface to
:class:`fontTools.feaLib.builder`, walks the statements in this
block, calling the builder callbacks."""
for s in self.statements:
s.build(builder)
def asFea(self, indent=""):
indent += SHIFT
return (
indent
+ ("\n" + indent).join([s.asFea(indent=indent) for s in self.statements])
+ "\n"
)
class FeatureFile(Block):
"""The top-level element of the syntax tree, containing the whole feature
file in its ``statements`` attribute."""
def __init__(self):
Block.__init__(self, location=None)
self.markClasses = {} # name --> ast.MarkClass
def asFea(self, indent=""):
return "\n".join(s.asFea(indent=indent) for s in self.statements)
class FeatureBlock(Block):
"""A named feature block."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
"""Call the ``start_feature`` callback on the builder object, visit
all the statements in this feature, and then call ``end_feature``."""
# TODO(sascha): Handle use_extension.
builder.start_feature(self.location, self.name)
# language exclude_dflt statements modify builder.features_
# limit them to this block with temporary builder.features_
features = builder.features_
builder.features_ = {}
Block.build(self, builder)
for key, value in builder.features_.items():
features.setdefault(key, []).extend(value)
builder.features_ = features
builder.end_feature()
def asFea(self, indent=""):
res = indent + "feature %s " % self.name.strip()
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += indent + "} %s;\n" % self.name.strip()
return res
class NestedBlock(Block):
"""A block inside another block, for example when found inside a
``cvParameters`` block."""
def __init__(self, tag, block_name, location=None):
Block.__init__(self, location)
self.tag = tag
self.block_name = block_name
def build(self, builder):
Block.build(self, builder)
if self.block_name == "ParamUILabelNameID":
builder.add_to_cv_num_named_params(self.tag)
def asFea(self, indent=""):
res = "{}{} {{\n".format(indent, self.block_name)
res += Block.asFea(self, indent=indent)
res += "{}}};\n".format(indent)
return res
class LookupBlock(Block):
"""A named lookup, containing ``statements``."""
def __init__(self, name, use_extension=False, location=None):
Block.__init__(self, location)
self.name, self.use_extension = name, use_extension
def build(self, builder):
# TODO(sascha): Handle use_extension.
builder.start_lookup_block(self.location, self.name)
Block.build(self, builder)
builder.end_lookup_block()
def asFea(self, indent=""):
res = "lookup {} ".format(self.name)
if self.use_extension:
res += "useExtension "
res += "{\n"
res += Block.asFea(self, indent=indent)
res += "{}}} {};\n".format(indent, self.name)
return res
class TableBlock(Block):
"""A ``table ... { }`` block."""
def __init__(self, name, location=None):
Block.__init__(self, location)
self.name = name
def asFea(self, indent=""):
res = "table {} {{\n".format(self.name.strip())
res += super(TableBlock, self).asFea(indent=indent)
res += "}} {};\n".format(self.name.strip())
return res
class GlyphClassDefinition(Statement):
"""Example: ``@UPPERCASE = [A-Z];``."""
def __init__(self, name, glyphs, location=None):
Statement.__init__(self, location)
self.name = name #: class name as a string, without initial ``@``
self.glyphs = glyphs #: a :class:`GlyphClass` object
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.glyphSet())
def asFea(self, indent=""):
return "@" + self.name + " = " + self.glyphs.asFea() + ";"
class GlyphClassDefStatement(Statement):
"""Example: ``GlyphClassDef @UPPERCASE, [B], [C], [D];``. The parameters
must be either :class:`GlyphClass` or :class:`GlyphClassName` objects, or
``None``."""
def __init__(
self, baseGlyphs, markGlyphs, ligatureGlyphs, componentGlyphs, location=None
):
Statement.__init__(self, location)
self.baseGlyphs, self.markGlyphs = (baseGlyphs, markGlyphs)
self.ligatureGlyphs = ligatureGlyphs
self.componentGlyphs = componentGlyphs
def build(self, builder):
"""Calls the builder's ``add_glyphClassDef`` callback."""
base = self.baseGlyphs.glyphSet() if self.baseGlyphs else tuple()
liga = self.ligatureGlyphs.glyphSet() if self.ligatureGlyphs else tuple()
mark = self.markGlyphs.glyphSet() if self.markGlyphs else tuple()
comp = self.componentGlyphs.glyphSet() if self.componentGlyphs else tuple()
builder.add_glyphClassDef(self.location, base, liga, mark, comp)
def asFea(self, indent=""):
return "GlyphClassDef {}, {}, {}, {};".format(
self.baseGlyphs.asFea() if self.baseGlyphs else "",
self.ligatureGlyphs.asFea() if self.ligatureGlyphs else "",
self.markGlyphs.asFea() if self.markGlyphs else "",
self.componentGlyphs.asFea() if self.componentGlyphs else "",
)
class MarkClass(object):
"""One `or more` ``markClass`` statements for the same mark class.
While glyph classes can be defined only once, the feature file format
allows expanding mark classes with multiple definitions, each using
different glyphs and anchors. The following are two ``MarkClassDefinitions``
for the same ``MarkClass``::
markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS;
markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS;
The ``MarkClass`` object is therefore just a container for a list of
:class:`MarkClassDefinition` statements.
"""
def __init__(self, name):
self.name = name
self.definitions = []
self.glyphs = OrderedDict() # glyph --> ast.MarkClassDefinitions
def addDefinition(self, definition):
"""Add a :class:`MarkClassDefinition` statement to this mark class."""
assert isinstance(definition, MarkClassDefinition)
self.definitions.append(definition)
for glyph in definition.glyphSet():
if glyph in self.glyphs:
otherLoc = self.glyphs[glyph].location
if otherLoc is None:
end = ""
else:
end = f" at {otherLoc}"
raise FeatureLibError(
"Glyph %s already defined%s" % (glyph, end), definition.location
)
self.glyphs[glyph] = definition
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return tuple(self.glyphs.keys())
def asFea(self, indent=""):
res = "\n".join(d.asFea() for d in self.definitions)
return res
class MarkClassDefinition(Statement):
"""A single ``markClass`` statement. The ``markClass`` should be a
:class:`MarkClass` object, the ``anchor`` an :class:`Anchor` object,
and the ``glyphs`` parameter should be a `glyph-containing object`_ .
Example:
.. code:: python
mc = MarkClass("FRENCH_ACCENTS")
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, 800),
GlyphClass([ GlyphName("acute"), GlyphName("grave") ])
) )
mc.addDefinition( MarkClassDefinition(mc, Anchor(350, -200),
GlyphClass([ GlyphName("cedilla") ])
) )
mc.asFea()
# markClass [acute grave] <anchor 350 800> @FRENCH_ACCENTS;
# markClass [cedilla] <anchor 350 -200> @FRENCH_ACCENTS;
"""
def __init__(self, markClass, anchor, glyphs, location=None):
Statement.__init__(self, location)
assert isinstance(markClass, MarkClass)
assert isinstance(anchor, Anchor) and isinstance(glyphs, Expression)
self.markClass, self.anchor, self.glyphs = markClass, anchor, glyphs
def glyphSet(self):
"""The glyphs in this class as a tuple of :class:`GlyphName` objects."""
return self.glyphs.glyphSet()
def asFea(self, indent=""):
return "markClass {} {} @{};".format(
self.glyphs.asFea(), self.anchor.asFea(), self.markClass.name
)
class AlternateSubstStatement(Statement):
"""A ``sub ... from ...`` statement.
``prefix``, ``glyph``, ``suffix`` and ``replacement`` should be lists of
`glyph-containing objects`_. ``glyph`` should be a `one element list`."""
def __init__(self, prefix, glyph, suffix, replacement, location=None):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = (prefix, glyph, suffix)
self.replacement = replacement
def build(self, builder):
"""Calls the builder's ``add_alternate_subst`` callback."""
glyph = self.glyph.glyphSet()
assert len(glyph) == 1, glyph
glyph = list(glyph)[0]
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
replacement = self.replacement.glyphSet()
builder.add_alternate_subst(self.location, prefix, glyph, suffix, replacement)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix):
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'" # even though we really only use 1
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
res += " from "
res += asFea(self.replacement)
res += ";"
return res
class Anchor(Expression):
"""An ``Anchor`` element, used inside a ``pos`` rule.
If a ``name`` is given, this will be used in preference to the coordinates.
Other values should be integer.
"""
def __init__(
self,
x,
y,
name=None,
contourpoint=None,
xDeviceTable=None,
yDeviceTable=None,
location=None,
):
Expression.__init__(self, location)
self.name = name
self.x, self.y, self.contourpoint = x, y, contourpoint
self.xDeviceTable, self.yDeviceTable = xDeviceTable, yDeviceTable
def asFea(self, indent=""):
if self.name is not None:
return "<anchor {}>".format(self.name)
res = "<anchor {} {}".format(self.x, self.y)
if self.contourpoint:
res += " contourpoint {}".format(self.contourpoint)
if self.xDeviceTable or self.yDeviceTable:
res += " "
res += deviceToString(self.xDeviceTable)
res += " "
res += deviceToString(self.yDeviceTable)
res += ">"
return res
class AnchorDefinition(Statement):
"""A named anchor definition. (2.e.viii). ``name`` should be a string."""
def __init__(self, name, x, y, contourpoint=None, location=None):
Statement.__init__(self, location)
self.name, self.x, self.y, self.contourpoint = name, x, y, contourpoint
def asFea(self, indent=""):
res = "anchorDef {} {}".format(self.x, self.y)
if self.contourpoint:
res += " contourpoint {}".format(self.contourpoint)
res += " {};".format(self.name)
return res
class AttachStatement(Statement):
"""A ``GDEF`` table ``Attach`` statement."""
def __init__(self, glyphs, contourPoints, location=None):
Statement.__init__(self, location)
self.glyphs = glyphs #: A `glyph-containing object`_
self.contourPoints = contourPoints #: A list of integer contour points
def build(self, builder):
"""Calls the builder's ``add_attach_points`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_attach_points(self.location, glyphs, self.contourPoints)
def asFea(self, indent=""):
return "Attach {} {};".format(
self.glyphs.asFea(), " ".join(str(c) for c in self.contourPoints)
)
class ChainContextPosStatement(Statement):
"""A chained contextual positioning statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
(_ for _ in lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_pos(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "pos "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyph))
res += ";"
return res
class ChainContextSubstStatement(Statement):
"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_ .
``lookups`` should be a list of elements representing what lookups
to apply at each glyph position. Each element should be a
:class:`LookupBlock` to apply a single chaining lookup at the given
position, a list of :class:`LookupBlock`\ s to apply multiple
lookups, or ``None`` to apply no lookup. The length of the outer
list should equal the length of ``glyphs``; the inner lists can be
of variable length."""
def __init__(self, prefix, glyphs, suffix, lookups, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = prefix, glyphs, suffix
self.lookups = list(lookups)
for i, lookup in enumerate(lookups):
if lookup:
try:
(_ for _ in lookup)
except TypeError:
self.lookups[i] = [lookup]
def build(self, builder):
"""Calls the builder's ``add_chain_context_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_chain_context_subst(
self.location, prefix, glyphs, suffix, self.lookups
)
def asFea(self, indent=""):
res = "sub "
if (
len(self.prefix)
or len(self.suffix)
or any([x is not None for x in self.lookups])
):
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
for i, g in enumerate(self.glyphs):
res += g.asFea() + "'"
if self.lookups[i]:
for lu in self.lookups[i]:
res += " lookup " + lu.name
if i < len(self.glyphs) - 1:
res += " "
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(map(asFea, self.glyph))
res += ";"
return res
class CursivePosStatement(Statement):
"""A cursive positioning statement. Entry and exit anchors can either
be :class:`Anchor` objects or ``None``."""
def __init__(self, glyphclass, entryAnchor, exitAnchor, location=None):
Statement.__init__(self, location)
self.glyphclass = glyphclass
self.entryAnchor, self.exitAnchor = entryAnchor, exitAnchor
def build(self, builder):
"""Calls the builder object's ``add_cursive_pos`` callback."""
builder.add_cursive_pos(
self.location, self.glyphclass.glyphSet(), self.entryAnchor, self.exitAnchor
)
def asFea(self, indent=""):
entry = self.entryAnchor.asFea() if self.entryAnchor else "<anchor NULL>"
exit = self.exitAnchor.asFea() if self.exitAnchor else "<anchor NULL>"
return "pos cursive {} {} {};".format(self.glyphclass.asFea(), entry, exit)
class FeatureReferenceStatement(Statement):
"""Example: ``feature salt;``"""
def __init__(self, featureName, location=None):
Statement.__init__(self, location)
self.location, self.featureName = (location, featureName)
def build(self, builder):
"""Calls the builder object's ``add_feature_reference`` callback."""
builder.add_feature_reference(self.location, self.featureName)
def asFea(self, indent=""):
return "feature {};".format(self.featureName)
class IgnorePosStatement(Statement):
"""An ``ignore pos`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_pos`` callback on each
rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_pos(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix) or len(suffix):
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res)
return "ignore pos " + ", ".join(contexts) + ";"
class IgnoreSubstStatement(Statement):
"""An ``ignore sub`` statement, containing `one or more` contexts to ignore.
``chainContexts`` should be a list of ``(prefix, glyphs, suffix)`` tuples,
with each of ``prefix``, ``glyphs`` and ``suffix`` being
`glyph-containing objects`_ ."""
def __init__(self, chainContexts, location=None):
Statement.__init__(self, location)
self.chainContexts = chainContexts
def build(self, builder):
"""Calls the builder object's ``add_chain_context_subst`` callback on
each rule context."""
for prefix, glyphs, suffix in self.chainContexts:
prefix = [p.glyphSet() for p in prefix]
glyphs = [g.glyphSet() for g in glyphs]
suffix = [s.glyphSet() for s in suffix]
builder.add_chain_context_subst(self.location, prefix, glyphs, suffix, [])
def asFea(self, indent=""):
contexts = []
for prefix, glyphs, suffix in self.chainContexts:
res = ""
if len(prefix) or len(suffix):
if len(prefix):
res += " ".join(map(asFea, prefix)) + " "
res += " ".join(g.asFea() + "'" for g in glyphs)
if len(suffix):
res += " " + " ".join(map(asFea, suffix))
else:
res += " ".join(map(asFea, glyphs))
contexts.append(res)
return "ignore sub " + ", ".join(contexts) + ";"
class IncludeStatement(Statement):
"""An ``include()`` statement."""
def __init__(self, filename, location=None):
super(IncludeStatement, self).__init__(location)
self.filename = filename #: String containing name of file to include
def build(self):
# TODO: consider lazy-loading the including parser/lexer?
raise FeatureLibError(
"Building an include statement is not implemented yet. "
"Instead, use Parser(..., followIncludes=True) for building.",
self.location,
)
def asFea(self, indent=""):
return indent + "include(%s);" % self.filename
class LanguageStatement(Statement):
"""A ``language`` statement within a feature."""
def __init__(self, language, include_default=True, required=False, location=None):
Statement.__init__(self, location)
assert len(language) == 4
self.language = language #: A four-character language tag
self.include_default = include_default #: If false, "exclude_dflt"
self.required = required
def build(self, builder):
"""Call the builder object's ``set_language`` callback."""
builder.set_language(
location=self.location,
language=self.language,
include_default=self.include_default,
required=self.required,
)
def asFea(self, indent=""):
res = "language {}".format(self.language.strip())
if not self.include_default:
res += " exclude_dflt"
if self.required:
res += " required"
res += ";"
return res
class LanguageSystemStatement(Statement):
"""A top-level ``languagesystem`` statement."""
def __init__(self, script, language, location=None):
Statement.__init__(self, location)
self.script, self.language = (script, language)
def build(self, builder):
"""Calls the builder object's ``add_language_system`` callback."""
builder.add_language_system(self.location, self.script, self.language)
def asFea(self, indent=""):
return "languagesystem {} {};".format(self.script, self.language.strip())
class FontRevisionStatement(Statement):
"""A ``head`` table ``FontRevision`` statement. ``revision`` should be a
number, and will be formatted to three significant decimal places."""
def __init__(self, revision, location=None):
Statement.__init__(self, location)
self.revision = revision
def build(self, builder):
builder.set_font_revision(self.location, self.revision)
def asFea(self, indent=""):
return "FontRevision {:.3f};".format(self.revision)
class LigatureCaretByIndexStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByIndex`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByIndex_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByIndex_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByIndex {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureCaretByPosStatement(Statement):
"""A ``GDEF`` table ``LigatureCaretByPos`` statement. ``glyphs`` should be
a `glyph-containing object`_, and ``carets`` should be a list of integers."""
def __init__(self, glyphs, carets, location=None):
Statement.__init__(self, location)
self.glyphs, self.carets = (glyphs, carets)
def build(self, builder):
"""Calls the builder object's ``add_ligatureCaretByPos_`` callback."""
glyphs = self.glyphs.glyphSet()
builder.add_ligatureCaretByPos_(self.location, glyphs, set(self.carets))
def asFea(self, indent=""):
return "LigatureCaretByPos {} {};".format(
self.glyphs.asFea(), " ".join(str(x) for x in self.carets)
)
class LigatureSubstStatement(Statement):
"""A chained contextual substitution statement.
``prefix``, ``glyphs``, and ``suffix`` should be lists of
`glyph-containing objects`_; ``replacement`` should be a single
`glyph-containing object`_.
If ``forceChain`` is True, this is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given."""
def __init__(self, prefix, glyphs, suffix, replacement, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.glyphs, self.suffix = (prefix, glyphs, suffix)
self.replacement, self.forceChain = replacement, forceChain
def build(self, builder):
prefix = [p.glyphSet() for p in self.prefix]
glyphs = [g.glyphSet() for g in self.glyphs]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_ligature_subst(
self.location, prefix, glyphs, suffix, self.replacement, self.forceChain
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(g.asFea() for g in self.prefix) + " "
res += " ".join(g.asFea() + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(g.asFea() for g in self.suffix)
else:
res += " ".join(g.asFea() for g in self.glyphs)
res += " by "
res += asFea(self.replacement)
res += ";"
return res
class LookupFlagStatement(Statement):
"""A ``lookupflag`` statement. The ``value`` should be an integer value
representing the flags in use, but not including the ``markAttachment``
class and ``markFilteringSet`` values, which must be specified as
glyph-containing objects."""
def __init__(
self, value=0, markAttachment=None, markFilteringSet=None, location=None
):
Statement.__init__(self, location)
self.value = value
self.markAttachment = markAttachment
self.markFilteringSet = markFilteringSet
def build(self, builder):
"""Calls the builder object's ``set_lookup_flag`` callback."""
markAttach = None
if self.markAttachment is not None:
markAttach = self.markAttachment.glyphSet()
markFilter = None
if self.markFilteringSet is not None:
markFilter = self.markFilteringSet.glyphSet()
builder.set_lookup_flag(self.location, self.value, markAttach, markFilter)
def asFea(self, indent=""):
res = []
flags = ["RightToLeft", "IgnoreBaseGlyphs", "IgnoreLigatures", "IgnoreMarks"]
curr = 1
for i in range(len(flags)):
if self.value & curr != 0:
res.append(flags[i])
curr = curr << 1
if self.markAttachment is not None:
res.append("MarkAttachmentType {}".format(self.markAttachment.asFea()))
if self.markFilteringSet is not None:
res.append("UseMarkFilteringSet {}".format(self.markFilteringSet.asFea()))
if not res:
res = ["0"]
return "lookupflag {};".format(" ".join(res))
class LookupReferenceStatement(Statement):
"""Represents a ``lookup ...;`` statement to include a lookup in a feature.
The ``lookup`` should be a :class:`LookupBlock` object."""
def __init__(self, lookup, location=None):
Statement.__init__(self, location)
self.location, self.lookup = (location, lookup)
def build(self, builder):
"""Calls the builder object's ``add_lookup_call`` callback."""
builder.add_lookup_call(self.lookup.name)
def asFea(self, indent=""):
return "lookup {};".format(self.lookup.name)
class MarkBasePosStatement(Statement):
"""A mark-to-base positioning rule. The ``base`` should be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, base, marks, location=None):
Statement.__init__(self, location)
self.base, self.marks = base, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_base_pos`` callback."""
builder.add_mark_base_pos(self.location, self.base.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos base {}".format(self.base.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MarkLigPosStatement(Statement):
"""A mark-to-ligature positioning rule. The ``ligatures`` must be a
`glyph-containing object`_. The ``marks`` should be a list of lists: each
element in the top-level list represents a component glyph, and is made
up of a list of (:class:`Anchor`, :class:`MarkClass`) tuples representing
mark attachment points for that position.
Example::
m1 = MarkClass("TOP_MARKS")
m2 = MarkClass("BOTTOM_MARKS")
# ... add definitions to mark classes...
glyph = GlyphName("lam_meem_jeem")
marks = [
[ (Anchor(625,1800), m1) ], # Attachments on 1st component (lam)
[ (Anchor(376,-378), m2) ], # Attachments on 2nd component (meem)
[ ] # No attachments on the jeem
]
mlp = MarkLigPosStatement(glyph, marks)
mlp.asFea()
# pos ligature lam_meem_jeem <anchor 625 1800> mark @TOP_MARKS
# ligComponent <anchor 376 -378> mark @BOTTOM_MARKS;
"""
def __init__(self, ligatures, marks, location=None):
Statement.__init__(self, location)
self.ligatures, self.marks = ligatures, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_lig_pos`` callback."""
builder.add_mark_lig_pos(self.location, self.ligatures.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos ligature {}".format(self.ligatures.asFea())
ligs = []
for l in self.marks:
temp = ""
if l is None or not len(l):
temp = "\n" + indent + SHIFT * 2 + "<anchor NULL>"
else:
for a, m in l:
temp += (
"\n"
+ indent
+ SHIFT * 2
+ "{} mark @{}".format(a.asFea(), m.name)
)
ligs.append(temp)
res += ("\n" + indent + SHIFT + "ligComponent").join(ligs)
res += ";"
return res
class MarkMarkPosStatement(Statement):
"""A mark-to-mark positioning rule. The ``baseMarks`` must be a
`glyph-containing object`_. The ``marks`` should be a list of
(:class:`Anchor`, :class:`MarkClass`) tuples."""
def __init__(self, baseMarks, marks, location=None):
Statement.__init__(self, location)
self.baseMarks, self.marks = baseMarks, marks
def build(self, builder):
"""Calls the builder object's ``add_mark_mark_pos`` callback."""
builder.add_mark_mark_pos(self.location, self.baseMarks.glyphSet(), self.marks)
def asFea(self, indent=""):
res = "pos mark {}".format(self.baseMarks.asFea())
for a, m in self.marks:
res += "\n" + indent + SHIFT + "{} mark @{}".format(a.asFea(), m.name)
res += ";"
return res
class MultipleSubstStatement(Statement):
"""A multiple substitution statement.
Args:
prefix: a list of `glyph-containing objects`_.
glyph: a single glyph-containing object.
suffix: a list of glyph-containing objects.
replacement: a list of glyph-containing objects.
forceChain: If true, the statement is expressed as a chaining rule
(e.g. ``sub f' i' by f_i``) even when no context is given.
"""
def __init__(
self, prefix, glyph, suffix, replacement, forceChain=False, location=None
):
Statement.__init__(self, location)
self.prefix, self.glyph, self.suffix = prefix, glyph, suffix
self.replacement = replacement
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_multiple_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
builder.add_multiple_subst(
self.location, prefix, self.glyph, suffix, self.replacement, self.forceChain
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += asFea(self.glyph) + "'"
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += asFea(self.glyph)
replacement = self.replacement or [NullGlyph()]
res += " by "
res += " ".join(map(asFea, replacement))
res += ";"
return res
class PairPosStatement(Statement):
"""A pair positioning statement.
``glyphs1`` and ``glyphs2`` should be `glyph-containing objects`_.
``valuerecord1`` should be a :class:`ValueRecord` object;
``valuerecord2`` should be either a :class:`ValueRecord` object or ``None``.
If ``enumerated`` is true, then this is expressed as an
`enumerated pair <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#6.b.ii>`_.
"""
def __init__(
self,
glyphs1,
valuerecord1,
glyphs2,
valuerecord2,
enumerated=False,
location=None,
):
Statement.__init__(self, location)
self.enumerated = enumerated
self.glyphs1, self.valuerecord1 = glyphs1, valuerecord1
self.glyphs2, self.valuerecord2 = glyphs2, valuerecord2
def build(self, builder):
"""Calls a callback on the builder object:
* If the rule is enumerated, calls ``add_specific_pair_pos`` on each
combination of first and second glyphs.
* If the glyphs are both single :class:`GlyphName` objects, calls
``add_specific_pair_pos``.
* Else, calls ``add_class_pair_pos``.
"""
if self.enumerated:
g = [self.glyphs1.glyphSet(), self.glyphs2.glyphSet()]
for glyph1, glyph2 in itertools.product(*g):
builder.add_specific_pair_pos(
self.location, glyph1, self.valuerecord1, glyph2, self.valuerecord2
)
return
is_specific = isinstance(self.glyphs1, GlyphName) and isinstance(
self.glyphs2, GlyphName
)
if is_specific:
builder.add_specific_pair_pos(
self.location,
self.glyphs1.glyph,
self.valuerecord1,
self.glyphs2.glyph,
self.valuerecord2,
)
else:
builder.add_class_pair_pos(
self.location,
self.glyphs1.glyphSet(),
self.valuerecord1,
self.glyphs2.glyphSet(),
self.valuerecord2,
)
def asFea(self, indent=""):
res = "enum " if self.enumerated else ""
if self.valuerecord2:
res += "pos {} {} {} {};".format(
self.glyphs1.asFea(),
self.valuerecord1.asFea(),
self.glyphs2.asFea(),
self.valuerecord2.asFea(),
)
else:
res += "pos {} {} {};".format(
self.glyphs1.asFea(), self.glyphs2.asFea(), self.valuerecord1.asFea()
)
return res
class ReverseChainSingleSubstStatement(Statement):
"""A reverse chaining substitution statement. You don't see those every day.
Note the unusual argument order: ``suffix`` comes `before` ``glyphs``.
``old_prefix``, ``old_suffix``, ``glyphs`` and ``replacements`` should be
lists of `glyph-containing objects`_. ``glyphs`` and ``replacements`` should
be one-item lists.
"""
def __init__(self, old_prefix, old_suffix, glyphs, replacements, location=None):
Statement.__init__(self, location)
self.old_prefix, self.old_suffix = old_prefix, old_suffix
self.glyphs = glyphs
self.replacements = replacements
def build(self, builder):
prefix = [p.glyphSet() for p in self.old_prefix]
suffix = [s.glyphSet() for s in self.old_suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_reverse_chain_single_subst(
self.location, prefix, suffix, dict(zip(originals, replaces))
)
def asFea(self, indent=""):
res = "rsub "
if len(self.old_prefix) or len(self.old_suffix):
if len(self.old_prefix):
res += " ".join(asFea(g) for g in self.old_prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.old_suffix):
res += " " + " ".join(asFea(g) for g in self.old_suffix)
else:
res += " ".join(map(asFea, self.glyphs))
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class SingleSubstStatement(Statement):
"""A single substitution statement.
Note the unusual argument order: ``prefix`` and suffix come `after`
the replacement ``glyphs``. ``prefix``, ``suffix``, ``glyphs`` and
``replace`` should be lists of `glyph-containing objects`_. ``glyphs`` and
``replace`` should be one-item lists.
"""
def __init__(self, glyphs, replace, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.prefix, self.suffix = prefix, suffix
self.forceChain = forceChain
self.glyphs = glyphs
self.replacements = replace
def build(self, builder):
"""Calls the builder object's ``add_single_subst`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
originals = self.glyphs[0].glyphSet()
replaces = self.replacements[0].glyphSet()
if len(replaces) == 1:
replaces = replaces * len(originals)
builder.add_single_subst(
self.location,
prefix,
suffix,
OrderedDict(zip(originals, replaces)),
self.forceChain,
)
def asFea(self, indent=""):
res = "sub "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(asFea(g) for g in self.prefix) + " "
res += " ".join(asFea(g) + "'" for g in self.glyphs)
if len(self.suffix):
res += " " + " ".join(asFea(g) for g in self.suffix)
else:
res += " ".join(asFea(g) for g in self.glyphs)
res += " by {};".format(" ".join(asFea(g) for g in self.replacements))
return res
class ScriptStatement(Statement):
"""A ``script`` statement."""
def __init__(self, script, location=None):
Statement.__init__(self, location)
self.script = script #: the script code
def build(self, builder):
"""Calls the builder's ``set_script`` callback."""
builder.set_script(self.location, self.script)
def asFea(self, indent=""):
return "script {};".format(self.script.strip())
class SinglePosStatement(Statement):
"""A single position statement. ``prefix`` and ``suffix`` should be
lists of `glyph-containing objects`_.
``pos`` should be a one-element list containing a (`glyph-containing object`_,
:class:`ValueRecord`) tuple."""
def __init__(self, pos, prefix, suffix, forceChain, location=None):
Statement.__init__(self, location)
self.pos, self.prefix, self.suffix = pos, prefix, suffix
self.forceChain = forceChain
def build(self, builder):
"""Calls the builder object's ``add_single_pos`` callback."""
prefix = [p.glyphSet() for p in self.prefix]
suffix = [s.glyphSet() for s in self.suffix]
pos = [(g.glyphSet(), value) for g, value in self.pos]
builder.add_single_pos(self.location, prefix, suffix, pos, self.forceChain)
def asFea(self, indent=""):
res = "pos "
if len(self.prefix) or len(self.suffix) or self.forceChain:
if len(self.prefix):
res += " ".join(map(asFea, self.prefix)) + " "
res += " ".join(
[
asFea(x[0]) + "'" + ((" " + x[1].asFea()) if x[1] else "")
for x in self.pos
]
)
if len(self.suffix):
res += " " + " ".join(map(asFea, self.suffix))
else:
res += " ".join(
[asFea(x[0]) + " " + (x[1].asFea() if x[1] else "") for x in self.pos]
)
res += ";"
return res
class SubtableStatement(Statement):
"""Represents a subtable break."""
def __init__(self, location=None):
Statement.__init__(self, location)
def build(self, builder):
"""Calls the builder objects's ``add_subtable_break`` callback."""
builder.add_subtable_break(self.location)
def asFea(self, indent=""):
return "subtable;"
class ValueRecord(Expression):
"""Represents a value record."""
def __init__(
self,
xPlacement=None,
yPlacement=None,
xAdvance=None,
yAdvance=None,
xPlaDevice=None,
yPlaDevice=None,
xAdvDevice=None,
yAdvDevice=None,
vertical=False,
location=None,
):
Expression.__init__(self, location)
self.xPlacement, self.yPlacement = (xPlacement, yPlacement)
self.xAdvance, self.yAdvance = (xAdvance, yAdvance)
self.xPlaDevice, self.yPlaDevice = (xPlaDevice, yPlaDevice)
self.xAdvDevice, self.yAdvDevice = (xAdvDevice, yAdvDevice)
self.vertical = vertical
def __eq__(self, other):
return (
self.xPlacement == other.xPlacement
and self.yPlacement == other.yPlacement
and self.xAdvance == other.xAdvance
and self.yAdvance == other.yAdvance
and self.xPlaDevice == other.xPlaDevice
and self.xAdvDevice == other.xAdvDevice
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return (
hash(self.xPlacement)
^ hash(self.yPlacement)
^ hash(self.xAdvance)
^ hash(self.yAdvance)
^ hash(self.xPlaDevice)
^ hash(self.yPlaDevice)
^ hash(self.xAdvDevice)
^ hash(self.yAdvDevice)
)
def asFea(self, indent=""):
if not self:
return "<NULL>"
x, y = self.xPlacement, self.yPlacement
xAdvance, yAdvance = self.xAdvance, self.yAdvance
xPlaDevice, yPlaDevice = self.xPlaDevice, self.yPlaDevice
xAdvDevice, yAdvDevice = self.xAdvDevice, self.yAdvDevice
vertical = self.vertical
# Try format A, if possible.
if x is None and y is None:
if xAdvance is None and vertical:
return str(yAdvance)
elif yAdvance is None and not vertical:
return str(xAdvance)
# Make any remaining None value 0 to avoid generating invalid records.
x = x or 0
y = y or 0
xAdvance = xAdvance or 0
yAdvance = yAdvance or 0
# Try format B, if possible.
if (
xPlaDevice is None
and yPlaDevice is None
and xAdvDevice is None
and yAdvDevice is None
):
return "<%s %s %s %s>" % (x, y, xAdvance, yAdvance)
# Last resort is format C.
return "<%s %s %s %s %s %s %s %s>" % (
x,
y,
xAdvance,
yAdvance,
deviceToString(xPlaDevice),
deviceToString(yPlaDevice),
deviceToString(xAdvDevice),
deviceToString(yAdvDevice),
)
def __bool__(self):
return any(
getattr(self, v) is not None
for v in [
"xPlacement",
"yPlacement",
"xAdvance",
"yAdvance",
"xPlaDevice",
"yPlaDevice",
"xAdvDevice",
"yAdvDevice",
]
)
__nonzero__ = __bool__
class ValueRecordDefinition(Statement):
"""Represents a named value record definition."""
def __init__(self, name, value, location=None):
Statement.__init__(self, location)
self.name = name #: Value record name as string
self.value = value #: :class:`ValueRecord` object
def asFea(self, indent=""):
return "valueRecordDef {} {};".format(self.value.asFea(), self.name)
def simplify_name_attributes(pid, eid, lid):
if pid == 3 and eid == 1 and lid == 1033:
return ""
elif pid == 1 and eid == 0 and lid == 0:
return "1"
else:
return "{} {} {}".format(pid, eid, lid)
class NameRecord(Statement):
"""Represents a name record. (`Section 9.e. <https://adobe-type-tools.github.io/afdko/OpenTypeFeatureFileSpecification.html#9.e>`_)"""
def __init__(self, nameID, platformID, platEncID, langID, string, location=None):
Statement.__init__(self, location)
self.nameID = nameID #: Name ID as integer (e.g. 9 for designer's name)
self.platformID = platformID #: Platform ID as integer
self.platEncID = platEncID #: Platform encoding ID as integer
self.langID = langID #: Language ID as integer
self.string = string #: Name record value
def build(self, builder):
"""Calls the builder object's ``add_name_record`` callback."""
builder.add_name_record(
self.location,
self.nameID,
self.platformID,
self.platEncID,
self.langID,
self.string,
)
def asFea(self, indent=""):
def escape(c, escape_pattern):
# Also escape U+0022 QUOTATION MARK and U+005C REVERSE SOLIDUS
if c >= 0x20 and c <= 0x7E and c not in (0x22, 0x5C):
return chr(c)
else:
return escape_pattern % c
encoding = getEncoding(self.platformID, self.platEncID, self.langID)
if encoding is None:
raise FeatureLibError("Unsupported encoding", self.location)
s = tobytes(self.string, encoding=encoding)
if encoding == "utf_16_be":
escaped_string = "".join(
[
escape(byteord(s[i]) * 256 + byteord(s[i + 1]), r"\%04x")
for i in range(0, len(s), 2)
]
)
else:
escaped_string = "".join([escape(byteord(b), r"\%02x") for b in s])
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'nameid {} {}"{}";'.format(self.nameID, plat, escaped_string)
class FeatureNameStatement(NameRecord):
"""Represents a ``sizemenuname`` or ``name`` statement."""
def build(self, builder):
"""Calls the builder object's ``add_featureName`` callback."""
NameRecord.build(self, builder)
builder.add_featureName(self.nameID)
def asFea(self, indent=""):
if self.nameID == "size":
tag = "sizemenuname"
else:
tag = "name"
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return '{} {}"{}";'.format(tag, plat, self.string)
class STATNameStatement(NameRecord):
"""Represents a STAT table ``name`` statement."""
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class SizeParameters(Statement):
"""A ``parameters`` statement."""
def __init__(self, DesignSize, SubfamilyID, RangeStart, RangeEnd, location=None):
Statement.__init__(self, location)
self.DesignSize = DesignSize
self.SubfamilyID = SubfamilyID
self.RangeStart = RangeStart
self.RangeEnd = RangeEnd
def build(self, builder):
"""Calls the builder object's ``set_size_parameters`` callback."""
builder.set_size_parameters(
self.location,
self.DesignSize,
self.SubfamilyID,
self.RangeStart,
self.RangeEnd,
)
def asFea(self, indent=""):
res = "parameters {:.1f} {}".format(self.DesignSize, self.SubfamilyID)
if self.RangeStart != 0 or self.RangeEnd != 0:
res += " {} {}".format(int(self.RangeStart * 10), int(self.RangeEnd * 10))
return res + ";"
class CVParametersNameStatement(NameRecord):
"""Represent a name statement inside a ``cvParameters`` block."""
def __init__(
self, nameID, platformID, platEncID, langID, string, block_name, location=None
):
NameRecord.__init__(
self, nameID, platformID, platEncID, langID, string, location=location
)
self.block_name = block_name
def build(self, builder):
"""Calls the builder object's ``add_cv_parameter`` callback."""
item = ""
if self.block_name == "ParamUILabelNameID":
item = "_{}".format(builder.cv_num_named_params_.get(self.nameID, 0))
builder.add_cv_parameter(self.nameID)
self.nameID = (self.nameID, self.block_name + item)
NameRecord.build(self, builder)
def asFea(self, indent=""):
plat = simplify_name_attributes(self.platformID, self.platEncID, self.langID)
if plat != "":
plat += " "
return 'name {}"{}";'.format(plat, self.string)
class CharacterStatement(Statement):
"""
Statement used in cvParameters blocks of Character Variant features (cvXX).
The Unicode value may be written with either decimal or hexadecimal
notation. The value must be preceded by '0x' if it is a hexadecimal value.
The largest Unicode value allowed is 0xFFFFFF.
"""
def __init__(self, character, tag, location=None):
Statement.__init__(self, location)
self.character = character
self.tag = tag
def build(self, builder):
"""Calls the builder object's ``add_cv_character`` callback."""
builder.add_cv_character(self.character, self.tag)
def asFea(self, indent=""):
return "Character {:#x};".format(self.character)
class BaseAxis(Statement):
"""An axis definition, being either a ``VertAxis.BaseTagList/BaseScriptList``
pair or a ``HorizAxis.BaseTagList/BaseScriptList`` pair."""
def __init__(self, bases, scripts, vertical, location=None):
Statement.__init__(self, location)
self.bases = bases #: A list of baseline tag names as strings
self.scripts = scripts #: A list of script record tuplets (script tag, default baseline tag, base coordinate)
self.vertical = vertical #: Boolean; VertAxis if True, HorizAxis if False
def build(self, builder):
"""Calls the builder object's ``set_base_axis`` callback."""
builder.set_base_axis(self.bases, self.scripts, self.vertical)
def asFea(self, indent=""):
direction = "Vert" if self.vertical else "Horiz"
scripts = [
"{} {} {}".format(a[0], a[1], " ".join(map(str, a[2])))
for a in self.scripts
]
return "{}Axis.BaseTagList {};\n{}{}Axis.BaseScriptList {};".format(
direction, " ".join(self.bases), indent, direction, ", ".join(scripts)
)
class OS2Field(Statement):
"""An entry in the ``OS/2`` table. Most ``values`` should be numbers or
strings, apart from when the key is ``UnicodeRange``, ``CodePageRange``
or ``Panose``, in which case it should be an array of integers."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_os2_field`` callback."""
builder.add_os2_field(self.key, self.value)
def asFea(self, indent=""):
def intarr2str(x):
return " ".join(map(str, x))
numbers = (
"FSType",
"TypoAscender",
"TypoDescender",
"TypoLineGap",
"winAscent",
"winDescent",
"XHeight",
"CapHeight",
"WeightClass",
"WidthClass",
"LowerOpSize",
"UpperOpSize",
)
ranges = ("UnicodeRange", "CodePageRange")
keywords = dict([(x.lower(), [x, str]) for x in numbers])
keywords.update([(x.lower(), [x, intarr2str]) for x in ranges])
keywords["panose"] = ["Panose", intarr2str]
keywords["vendor"] = ["Vendor", lambda y: '"{}"'.format(y)]
if self.key in keywords:
return "{} {};".format(
keywords[self.key][0], keywords[self.key][1](self.value)
)
return "" # should raise exception
class HheaField(Statement):
"""An entry in the ``hhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_hhea_field`` callback."""
builder.add_hhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("CaretOffset", "Ascender", "Descender", "LineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class VheaField(Statement):
"""An entry in the ``vhea`` table."""
def __init__(self, key, value, location=None):
Statement.__init__(self, location)
self.key = key
self.value = value
def build(self, builder):
"""Calls the builder object's ``add_vhea_field`` callback."""
builder.add_vhea_field(self.key, self.value)
def asFea(self, indent=""):
fields = ("VertTypoAscender", "VertTypoDescender", "VertTypoLineGap")
keywords = dict([(x.lower(), x) for x in fields])
return "{} {};".format(keywords[self.key], self.value)
class STATDesignAxisStatement(Statement):
"""A STAT table Design Axis
Args:
tag (str): a 4 letter axis tag
axisOrder (int): an int
names (list): a list of :class:`STATNameStatement` objects
"""
def __init__(self, tag, axisOrder, names, location=None):
Statement.__init__(self, location)
self.tag = tag
self.axisOrder = axisOrder
self.names = names
self.location = location
def build(self, builder):
builder.addDesignAxis(self, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = f"DesignAxis {self.tag} {self.axisOrder} {{ \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackName(Statement):
"""STAT table ElidedFallbackName
Args:
names: a list of :class:`STATNameStatement` objects
"""
def __init__(self, names, location=None):
Statement.__init__(self, location)
self.names = names
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.names, self.location)
def asFea(self, indent=""):
indent += SHIFT
res = "ElidedFallbackName { \n"
res += ("\n" + indent).join([s.asFea(indent=indent) for s in self.names]) + "\n"
res += "};"
return res
class ElidedFallbackNameID(Statement):
"""STAT table ElidedFallbackNameID
Args:
value: an int pointing to an existing name table name ID
"""
def __init__(self, value, location=None):
Statement.__init__(self, location)
self.value = value
self.location = location
def build(self, builder):
builder.setElidedFallbackName(self.value, self.location)
def asFea(self, indent=""):
return f"ElidedFallbackNameID {self.value};"
class STATAxisValueStatement(Statement):
"""A STAT table Axis Value Record
Args:
names (list): a list of :class:`STATNameStatement` objects
locations (list): a list of :class:`AxisValueLocationStatement` objects
flags (int): an int
"""
def __init__(self, names, locations, flags, location=None):
Statement.__init__(self, location)
self.names = names
self.locations = locations
self.flags = flags
def build(self, builder):
builder.addAxisValueRecord(self, self.location)
def asFea(self, indent=""):
res = "AxisValue {\n"
for location in self.locations:
res += location.asFea()
for nameRecord in self.names:
res += nameRecord.asFea()
res += "\n"
if self.flags:
flags = ["OlderSiblingFontAttribute", "ElidableAxisValueName"]
flagStrings = []
curr = 1
for i in range(len(flags)):
if self.flags & curr != 0:
flagStrings.append(flags[i])
curr = curr << 1
res += f"flag {' '.join(flagStrings)};\n"
res += "};"
return res
class AxisValueLocationStatement(Statement):
"""
A STAT table Axis Value Location
Args:
tag (str): a 4 letter axis tag
values (list): a list of ints and/or floats
"""
def __init__(self, tag, values, location=None):
Statement.__init__(self, location)
self.tag = tag
self.values = values
def asFea(self, res=""):
res += f"location {self.tag} "
res += f"{' '.join(str(i) for i in self.values)};\n"
return res
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/feaLib/ast.py",
"copies": "3",
"size": "70109",
"license": "apache-2.0",
"hash": -8331864788724088000,
"line_mean": 33.553474618,
"line_max": 138,
"alpha_frac": 0.5802821321,
"autogenerated": false,
"ratio": 3.880715155540795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00049429669030464,
"num_lines": 2029
} |
from fontTools.misc.py23 import byteord, tostr
import re
from bisect import bisect_right
try:
# use unicodedata backport compatible with python2:
# https://github.com/mikekap/unicodedata2
from unicodedata2 import *
except ImportError: # pragma: no cover
# fall back to built-in unicodedata (possibly outdated)
from unicodedata import *
from . import Blocks, Scripts, ScriptExtensions, OTTags
__all__ = [tostr(s) for s in (
# names from built-in unicodedata module
"lookup",
"name",
"decimal",
"digit",
"numeric",
"category",
"bidirectional",
"combining",
"east_asian_width",
"mirrored",
"decomposition",
"normalize",
"unidata_version",
"ucd_3_2_0",
# additonal functions
"block",
"script",
"script_extension",
"script_name",
"script_code",
"script_horizontal_direction",
"ot_tags_from_script",
"ot_tag_to_script",
)]
def script(char):
""" Return the four-letter script code assigned to the Unicode character
'char' as string.
>>> script("a")
'Latn'
>>> script(",")
'Zyyy'
>>> script(chr(0x10FFFF))
'Zzzz'
"""
code = byteord(char)
# 'bisect_right(a, x, lo=0, hi=len(a))' returns an insertion point which
# comes after (to the right of) any existing entries of x in a, and it
# partitions array a into two halves so that, for the left side
# all(val <= x for val in a[lo:i]), and for the right side
# all(val > x for val in a[i:hi]).
# Our 'SCRIPT_RANGES' is a sorted list of ranges (only their starting
# breakpoints); we want to use `bisect_right` to look up the range that
# contains the given codepoint: i.e. whose start is less than or equal
# to the codepoint. Thus, we subtract -1 from the index returned.
i = bisect_right(Scripts.RANGES, code)
return Scripts.VALUES[i-1]
def script_extension(char):
""" Return the script extension property assigned to the Unicode character
'char' as a set of string.
>>> script_extension("a") == {'Latn'}
True
>>> script_extension(chr(0x060C)) == {'Rohg', 'Syrc', 'Yezi', 'Arab', 'Thaa'}
True
>>> script_extension(chr(0x10FFFF)) == {'Zzzz'}
True
"""
code = byteord(char)
i = bisect_right(ScriptExtensions.RANGES, code)
value = ScriptExtensions.VALUES[i-1]
if value is None:
# code points not explicitly listed for Script Extensions
# have as their value the corresponding Script property value
return {script(char)}
return value
def script_name(code, default=KeyError):
""" Return the long, human-readable script name given a four-letter
Unicode script code.
If no matching name is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback value (e.g.
'Unknown' or None) instead of throwing an error.
"""
try:
return str(Scripts.NAMES[code].replace("_", " "))
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
_normalize_re = re.compile(r"[-_ ]+")
def _normalize_property_name(string):
"""Remove case, strip space, '-' and '_' for loose matching."""
return _normalize_re.sub("", string).lower()
_SCRIPT_CODES = {_normalize_property_name(v): k
for k, v in Scripts.NAMES.items()}
def script_code(script_name, default=KeyError):
"""Returns the four-letter Unicode script code from its long name
If no matching script code is found, a KeyError is raised by default.
You can use the 'default' argument to return a fallback string (e.g.
'Zzzz' or None) instead of throwing an error.
"""
normalized_name = _normalize_property_name(script_name)
try:
return _SCRIPT_CODES[normalized_name]
except KeyError:
if isinstance(default, type) and issubclass(default, KeyError):
raise
return default
# The data on script direction is taken from CLDR 37:
# https://github.com/unicode-org/cldr/blob/release-37/common/properties/scriptMetadata.txt
RTL_SCRIPTS = {
# Unicode-1.1 additions
'Arab', # Arabic
'Hebr', # Hebrew
# Unicode-3.0 additions
'Syrc', # Syriac
'Thaa', # Thaana
# Unicode-4.0 additions
'Cprt', # Cypriot
# Unicode-4.1 additions
'Khar', # Kharoshthi
# Unicode-5.0 additions
'Phnx', # Phoenician
'Nkoo', # Nko
# Unicode-5.1 additions
'Lydi', # Lydian
# Unicode-5.2 additions
'Avst', # Avestan
'Armi', # Imperial Aramaic
'Phli', # Inscriptional Pahlavi
'Prti', # Inscriptional Parthian
'Sarb', # Old South Arabian
'Orkh', # Old Turkic
'Samr', # Samaritan
# Unicode-6.0 additions
'Mand', # Mandaic
# Unicode-6.1 additions
'Merc', # Meroitic Cursive
'Mero', # Meroitic Hieroglyphs
# Unicode-7.0 additions
'Mani', # Manichaean
'Mend', # Mende Kikakui
'Nbat', # Nabataean
'Narb', # Old North Arabian
'Palm', # Palmyrene
'Phlp', # Psalter Pahlavi
# Unicode-8.0 additions
'Hatr', # Hatran
'Hung', # Old Hungarian
# Unicode-9.0 additions
'Adlm', # Adlam
# Unicode-11.0 additions
'Rohg', # Hanifi Rohingya
'Sogo', # Old Sogdian
'Sogd', # Sogdian
# Unicode-12.0 additions
'Elym', # Elymaic
# Unicode-13.0 additions
'Chrs', # Chorasmian
'Yezi', # Yezidi
}
def script_horizontal_direction(script_code, default=KeyError):
""" Return "RTL" for scripts that contain right-to-left characters
according to the Bidi_Class property. Otherwise return "LTR".
"""
if script_code not in Scripts.NAMES:
if isinstance(default, type) and issubclass(default, KeyError):
raise default(script_code)
return default
return str("RTL") if script_code in RTL_SCRIPTS else str("LTR")
def block(char):
""" Return the block property assigned to the Unicode character 'char'
as a string.
>>> block("a")
'Basic Latin'
>>> block(chr(0x060C))
'Arabic'
>>> block(chr(0xEFFFF))
'No_Block'
"""
code = byteord(char)
i = bisect_right(Blocks.RANGES, code)
return Blocks.VALUES[i-1]
def ot_tags_from_script(script_code):
""" Return a list of OpenType script tags associated with a given
Unicode script code.
Return ['DFLT'] script tag for invalid/unknown script codes.
"""
if script_code not in Scripts.NAMES:
return [OTTags.DEFAULT_SCRIPT]
script_tags = [
OTTags.SCRIPT_EXCEPTIONS.get(
script_code,
script_code[0].lower() + script_code[1:]
)
]
if script_code in OTTags.NEW_SCRIPT_TAGS:
script_tags.extend(OTTags.NEW_SCRIPT_TAGS[script_code])
script_tags.reverse() # last in, first out
return script_tags
def ot_tag_to_script(tag):
""" Return the Unicode script code for the given OpenType script tag, or
None for "DFLT" tag or if there is no Unicode script associated with it.
Raises ValueError if the tag is invalid.
"""
tag = tostr(tag).strip()
if not tag or " " in tag or len(tag) > 4:
raise ValueError("invalid OpenType tag: %r" % tag)
while len(tag) != 4:
tag += str(" ") # pad with spaces
if tag == OTTags.DEFAULT_SCRIPT:
# it's unclear which Unicode script the "DFLT" OpenType tag maps to,
# so here we return None
return None
if tag in OTTags.NEW_SCRIPT_TAGS_REVERSED:
return OTTags.NEW_SCRIPT_TAGS_REVERSED[tag]
# This side of the conversion is fully algorithmic
# Any spaces at the end of the tag are replaced by repeating the last
# letter. Eg 'nko ' -> 'Nkoo'.
# Change first char to uppercase
script_code = tag[0].upper() + tag[1]
for i in range(2, 4):
script_code += (script_code[i-1] if tag[i] == " " else tag[i])
if script_code not in Scripts.NAMES:
return None
return script_code
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/unicodedata/__init__.py",
"copies": "5",
"size": "8034",
"license": "apache-2.0",
"hash": -8646776370873531000,
"line_mean": 27.2887323944,
"line_max": 90,
"alpha_frac": 0.6294498382,
"autogenerated": false,
"ratio": 3.40857021637675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.653802005457675,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
import logging
log = logging.getLogger(__name__)
# Apple's documentation of 'avar':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6avar.html
AVAR_HEADER_FORMAT = """
> # big endian
majorVersion: H
minorVersion: H
reserved: H
axisCount: H
"""
assert sstruct.calcsize(AVAR_HEADER_FORMAT) == 8, sstruct.calcsize(AVAR_HEADER_FORMAT)
class table__a_v_a_r(DefaultTable.DefaultTable):
dependencies = ["fvar"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.segments = {}
def compile(self, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {
"majorVersion": 1,
"minorVersion": 0,
"reserved": 0,
"axisCount": len(axisTags)
}
result = [sstruct.pack(AVAR_HEADER_FORMAT, header)]
for axis in axisTags:
mappings = sorted(self.segments[axis].items())
result.append(struct.pack(">H", len(mappings)))
for key, value in mappings:
fixedKey = fl2fi(key, 14)
fixedValue = fl2fi(value, 14)
result.append(struct.pack(">hh", fixedKey, fixedValue))
return bytesjoin(result)
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {}
headerSize = sstruct.calcsize(AVAR_HEADER_FORMAT)
header = sstruct.unpack(AVAR_HEADER_FORMAT, data[0:headerSize])
majorVersion = header["majorVersion"]
if majorVersion != 1:
raise TTLibError("unsupported 'avar' version %d" % majorVersion)
pos = headerSize
for axis in axisTags:
segments = self.segments[axis] = {}
numPairs = struct.unpack(">H", data[pos:pos+2])[0]
pos = pos + 2
for _ in range(numPairs):
fromValue, toValue = struct.unpack(">hh", data[pos:pos+4])
segments[fi2fl(fromValue, 14)] = fi2fl(toValue, 14)
pos = pos + 4
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
for axis in axisTags:
writer.begintag("segment", axis=axis)
writer.newline()
for key, value in sorted(self.segments[axis].items()):
key = fl2str(key, 14)
value = fl2str(value, 14)
writer.simpletag("mapping", **{"from": key, "to": value})
writer.newline()
writer.endtag("segment")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "segment":
axis = attrs["axis"]
segment = self.segments[axis] = {}
for element in content:
if isinstance(element, tuple):
elementName, elementAttrs, _ = element
if elementName == "mapping":
fromValue = str2fl(elementAttrs["from"], 14)
toValue = str2fl(elementAttrs["to"], 14)
if fromValue in segment:
log.warning("duplicate entry for %s in axis '%s'",
fromValue, axis)
segment[fromValue] = toValue
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_a_v_a_r.py",
"copies": "5",
"size": "3673",
"license": "apache-2.0",
"hash": 4310089192650833000,
"line_mean": 35.73,
"line_max": 86,
"alpha_frac": 0.5662945821,
"autogenerated": false,
"ratio": 3.919957310565635,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6986251892665635,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
from fontTools.misc.textTools import safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
from collections.abc import MutableMapping
# Apple's documentation of 'trak':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html
TRAK_HEADER_FORMAT = """
> # big endian
version: 16.16F
format: H
horizOffset: H
vertOffset: H
reserved: H
"""
TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT)
TRACK_DATA_FORMAT = """
> # big endian
nTracks: H
nSizes: H
sizeTableOffset: L
"""
TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT)
TRACK_TABLE_ENTRY_FORMAT = """
> # big endian
track: 16.16F
nameIndex: H
offset: H
"""
TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT)
# size values are actually '16.16F' fixed-point values, but here I do the
# fixedToFloat conversion manually instead of relying on sstruct
SIZE_VALUE_FORMAT = ">l"
SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT)
# per-Size values are in 'FUnits', i.e. 16-bit signed integers
PER_SIZE_VALUE_FORMAT = ">h"
PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT)
class table__t_r_a_k(DefaultTable.DefaultTable):
dependencies = ['name']
def compile(self, ttFont):
dataList = []
offset = TRAK_HEADER_FORMAT_SIZE
for direction in ('horiz', 'vert'):
trackData = getattr(self, direction + 'Data', TrackData())
offsetName = direction + 'Offset'
# set offset to 0 if None or empty
if not trackData:
setattr(self, offsetName, 0)
continue
# TrackData table format must be longword aligned
alignedOffset = (offset + 3) & ~3
padding, offset = b"\x00"*(alignedOffset - offset), alignedOffset
setattr(self, offsetName, offset)
data = trackData.compile(offset)
offset += len(data)
dataList.append(padding + data)
self.reserved = 0
tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
return tableData
def decompile(self, data, ttFont):
sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
for direction in ('horiz', 'vert'):
trackData = TrackData()
offset = getattr(self, direction + 'Offset')
if offset != 0:
trackData.decompile(data, offset)
setattr(self, direction + 'Data', trackData)
def toXML(self, writer, ttFont):
writer.simpletag('version', value=self.version)
writer.newline()
writer.simpletag('format', value=self.format)
writer.newline()
for direction in ('horiz', 'vert'):
dataName = direction + 'Data'
writer.begintag(dataName)
writer.newline()
trackData = getattr(self, dataName, TrackData())
trackData.toXML(writer, ttFont)
writer.endtag(dataName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == 'version':
self.version = safeEval(attrs['value'])
elif name == 'format':
self.format = safeEval(attrs['value'])
elif name in ('horizData', 'vertData'):
trackData = TrackData()
setattr(self, name, trackData)
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content_ = element
trackData.fromXML(name, attrs, content_, ttFont)
class TrackData(MutableMapping):
def __init__(self, initialdata={}):
self._map = dict(initialdata)
def compile(self, offset):
nTracks = len(self)
sizes = self.sizes()
nSizes = len(sizes)
# offset to the start of the size subtable
offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE*nTracks
trackDataHeader = sstruct.pack(
TRACK_DATA_FORMAT,
{'nTracks': nTracks, 'nSizes': nSizes, 'sizeTableOffset': offset})
entryDataList = []
perSizeDataList = []
# offset to per-size tracking values
offset += SIZE_VALUE_FORMAT_SIZE*nSizes
# sort track table entries by track value
for track, entry in sorted(self.items()):
assert entry.nameIndex is not None
entry.track = track
entry.offset = offset
entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
# sort per-size values by size
for size, value in sorted(entry.items()):
perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
offset += PER_SIZE_VALUE_FORMAT_SIZE*nSizes
# sort size values
sizeDataList = [struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)]
data = bytesjoin([trackDataHeader] + entryDataList + sizeDataList + perSizeDataList)
return data
def decompile(self, data, offset):
# initial offset is from the start of trak table to the current TrackData
trackDataHeader = data[offset:offset+TRACK_DATA_FORMAT_SIZE]
if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
raise TTLibError('not enough data to decompile TrackData header')
sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
offset += TRACK_DATA_FORMAT_SIZE
nSizes = self.nSizes
sizeTableOffset = self.sizeTableOffset
sizeTable = []
for i in range(nSizes):
sizeValueData = data[sizeTableOffset:sizeTableOffset+SIZE_VALUE_FORMAT_SIZE]
if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
raise TTLibError('not enough data to decompile TrackData size subtable')
sizeValue, = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
sizeTable.append(fi2fl(sizeValue, 16))
sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
for i in range(self.nTracks):
entry = TrackTableEntry()
entryData = data[offset:offset+TRACK_TABLE_ENTRY_FORMAT_SIZE]
if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
raise TTLibError('not enough data to decompile TrackTableEntry record')
sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
perSizeOffset = entry.offset
for j in range(nSizes):
size = sizeTable[j]
perSizeValueData = data[perSizeOffset:perSizeOffset+PER_SIZE_VALUE_FORMAT_SIZE]
if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
raise TTLibError('not enough data to decompile per-size track values')
perSizeValue, = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
entry[size] = perSizeValue
perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
self[entry.track] = entry
offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
def toXML(self, writer, ttFont):
nTracks = len(self)
nSizes = len(self.sizes())
writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
writer.newline()
for track, entry in sorted(self.items()):
assert entry.nameIndex is not None
entry.track = track
entry.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name != 'trackEntry':
return
entry = TrackTableEntry()
entry.fromXML(name, attrs, content, ttFont)
self[entry.track] = entry
def sizes(self):
if not self:
return frozenset()
tracks = list(self.tracks())
sizes = self[tracks.pop(0)].sizes()
for track in tracks:
entrySizes = self[track].sizes()
if sizes != entrySizes:
raise TTLibError(
"'trak' table entries must specify the same sizes: "
"%s != %s" % (sorted(sizes), sorted(entrySizes)))
return frozenset(sizes)
def __getitem__(self, track):
return self._map[track]
def __delitem__(self, track):
del self._map[track]
def __setitem__(self, track, entry):
self._map[track] = entry
def __len__(self):
return len(self._map)
def __iter__(self):
return iter(self._map)
def keys(self):
return self._map.keys()
tracks = keys
def __repr__(self):
return "TrackData({})".format(self._map if self else "")
class TrackTableEntry(MutableMapping):
def __init__(self, values={}, nameIndex=None):
self.nameIndex = nameIndex
self._map = dict(values)
def toXML(self, writer, ttFont):
name = ttFont["name"].getDebugName(self.nameIndex)
writer.begintag(
"trackEntry",
(('value', fl2str(self.track, 16)), ('nameIndex', self.nameIndex)))
writer.newline()
if name:
writer.comment(name)
writer.newline()
for size, perSizeValue in sorted(self.items()):
writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
writer.newline()
writer.endtag("trackEntry")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.track = str2fl(attrs['value'], 16)
self.nameIndex = safeEval(attrs['nameIndex'])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, _ = element
if name != 'track':
continue
size = str2fl(attrs['size'], 16)
self[size] = safeEval(attrs['value'])
def __getitem__(self, size):
return self._map[size]
def __delitem__(self, size):
del self._map[size]
def __setitem__(self, size, value):
self._map[size] = value
def __len__(self):
return len(self._map)
def __iter__(self):
return iter(self._map)
def keys(self):
return self._map.keys()
sizes = keys
def __repr__(self):
return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.nameIndex == other.nameIndex and dict(self) == dict(other)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_t_r_a_k.py",
"copies": "5",
"size": "9352",
"license": "apache-2.0",
"hash": 5388250482087040000,
"line_mean": 28.6888888889,
"line_max": 88,
"alpha_frac": 0.6989948674,
"autogenerated": false,
"ratio": 3.114219114219114,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.025497902223865776,
"num_lines": 315
} |
from fontTools.misc.py23 import bytesjoin
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
import logging
log = logging.getLogger(__name__)
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
# The table format string is split to handle sbitLineMetrics simply.
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
# The compound type for hori and vert.
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
# hori and vert go between the two parts.
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
# Save the original data because offsets are from the start of the table.
origData = data
i = 0;
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
i += 8;
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
i += 16
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
i += 12
dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
i += 8
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
# Data size of the header + bitmapSizeTable needs to be calculated
# in order to form offsets. This value will hold the size of the data
# in dataList after all the data is consolidated in dataList.
dataSize = len(dataList[0])
# The table will be structured in the following order:
# (0) header
# (1) Each bitmapSizeTable [1 ... self.numSizes]
# (2) Alternate between indexSubTableArray and indexSubTable
# for each bitmapSizeTable present.
#
# The issue is maintaining the proper offsets when table information
# gets moved around. All offsets and size information must be recalculated
# when building the table to allow editing within ttLib and also allow easy
# import/export to and from XML. All of this offset information is lost
# when exporting to XML so everything must be calculated fresh so importing
# from XML will work cleanly. Only byte offset and size information is
# calculated fresh. Count information like numberOfIndexSubTables is
# checked through assertions. If the information in this table was not
# touched or was changed properly then these types of values should match.
#
# The table will be rebuilt the following way:
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
# compute the offsets properly.
# (1) For each bitmapSizeTable compute the indexSubTable and
# indexSubTableArray pair. The indexSubTable must be computed first
# so that the offset information in indexSubTableArray can be
# calculated. Update the data size after each pairing.
# (2) Build each bitmapSizeTable.
# (3) Consolidate all the data into the main dataList in the correct order.
for _ in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
# Precompute the size of the indexSubTableArray. This information
# is important for correctly calculating the new value for
# additionalOffsetToIndexSubtable.
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
# Grow the strike array to the appropriate size. The XML format
# allows for the strike index value to be out of order.
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
# Returns all the simple metric names that bitmap size table
# cares about in terms of XML creation.
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
# Skip the first 3 data names because they are byte offsets and counts.
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Create a lookup for all the simple names that make sense to
# bitmap size table. Only read the information from these names.
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
# Pad the data so that it matches the fixed size.
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure all the ids are consecutive. This is required by Format 2.
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
# There are one too many glyph ids. Get rid of the last one.
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Format 4
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# Resetting this offset may change the value for round tripping but is safer
# and allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Create an iterator over the ids plus a padding value.
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/E_B_L_C_.py",
"copies": "5",
"size": "25177",
"license": "apache-2.0",
"hash": 1645807917849416200,
"line_mean": 39.2188498403,
"line_max": 139,
"alpha_frac": 0.7388489494,
"autogenerated": false,
"ratio": 3.3794630872483222,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.024820498123886883,
"num_lines": 626
} |
from fontTools.misc.py23 import bytesjoin
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import struct
class table_V_O_R_G_(DefaultTable.DefaultTable):
""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph
ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph.
"""
def decompile(self, data, ttFont):
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8])
assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle"
data = data[8:]
vids = []
gids = []
pos = 0
for i in range(self.numVertOriginYMetrics):
gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4])
pos += 4
gids.append(gid)
vids.append(vOrigin)
self.VOriginRecords = vOrig = {}
glyphOrder = ttFont.getGlyphOrder()
try:
names = [glyphOrder[gid] for gid in gids]
except IndexError:
getGlyphName = self.getGlyphName
names = map(getGlyphName, gids)
for name, vid in zip(names, vids):
vOrig[name] = vid
def compile(self, ttFont):
vorgs = list(self.VOriginRecords.values())
names = list(self.VOriginRecords.keys())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = [nameMap[name] for name in names]
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
gids = [nameMap[name] for name in names]
vOriginTable = list(zip(gids, vorgs))
self.numVertOriginYMetrics = len(vorgs)
vOriginTable.sort() # must be in ascending GID order
dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics)
dataList.insert(0, header)
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.simpletag("majorVersion", value=self.majorVersion)
writer.newline()
writer.simpletag("minorVersion", value=self.minorVersion)
writer.newline()
writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
writer.newline()
writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
writer.newline()
vOriginTable = []
glyphNames = self.VOriginRecords.keys()
for glyphName in glyphNames:
try:
gid = ttFont.getGlyphID(glyphName)
except:
assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
vOriginTable.sort()
for entry in vOriginTable:
vOriginRec = VOriginRecord(entry[1], entry[2])
vOriginRec.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "VOriginRecords"):
self.VOriginRecords = {}
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
if name == "VOriginRecord":
vOriginRec = VOriginRecord()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
vOriginRec.fromXML(name, attrs, content, ttFont)
self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
elif "value" in attrs:
setattr(self, name, safeEval(attrs["value"]))
def __getitem__(self, glyphSelector):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if glyphSelector not in self.VOriginRecords:
return self.defaultVertOriginY
return self.VOriginRecords[glyphSelector]
def __setitem__(self, glyphSelector, value):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if value != self.defaultVertOriginY:
self.VOriginRecords[glyphSelector] = value
elif glyphSelector in self.VOriginRecords:
del self.VOriginRecords[glyphSelector]
def __delitem__(self, glyphSelector):
del self.VOriginRecords[glyphSelector]
class VOriginRecord(object):
def __init__(self, name=None, vOrigin=None):
self.glyphName = name
self.vOrigin = vOrigin
def toXML(self, writer, ttFont):
writer.begintag("VOriginRecord")
writer.newline()
writer.simpletag("glyphName", value=self.glyphName)
writer.newline()
writer.simpletag("vOrigin", value=self.vOrigin)
writer.newline()
writer.endtag("VOriginRecord")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name == "glyphName":
setattr(self, name, value)
else:
setattr(self, name, safeEval(value))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/V_O_R_G_.py",
"copies": "5",
"size": "4751",
"license": "apache-2.0",
"hash": 6862505940170168000,
"line_mean": 33.4275362319,
"line_max": 126,
"alpha_frac": 0.7297411071,
"autogenerated": false,
"ratio": 3.163115845539281,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6392856952639282,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.encodingTools import getEncoding
from fontTools.ttLib import getSearchRange
from fontTools.unicode import Unicode
from . import DefaultTable
import sys
import struct
import array
import logging
log = logging.getLogger(__name__)
def _make_map(font, chars, gids):
assert len(chars) == len(gids)
cmap = {}
glyphOrder = font.getGlyphOrder()
for char,gid in zip(chars,gids):
if gid == 0:
continue
try:
name = glyphOrder[gid]
except IndexError:
name = font.getGlyphName(gid)
cmap[char] = name
return cmap
class table__c_m_a_p(DefaultTable.DefaultTable):
def getcmap(self, platformID, platEncID):
for subtable in self.tables:
if (subtable.platformID == platformID and
subtable.platEncID == platEncID):
return subtable
return None # not found
def getBestCmap(self, cmapPreferences=((3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0))):
"""Return the 'best' unicode cmap dictionary available in the font,
or None, if no unicode cmap subtable is available.
By default it will search for the following (platformID, platEncID)
pairs:
(3, 10), (0, 6), (0, 4), (3, 1), (0, 3), (0, 2), (0, 1), (0, 0)
This can be customized via the cmapPreferences argument.
"""
for platformID, platEncID in cmapPreferences:
cmapSubtable = self.getcmap(platformID, platEncID)
if cmapSubtable is not None:
return cmapSubtable.cmap
return None # None of the requested cmap subtables were found
def buildReversed(self):
"""Returns a reverse cmap such as {'one':{0x31}, 'A':{0x41,0x391}}.
The values are sets of Unicode codepoints because
some fonts map different codepoints to the same glyph.
For example, U+0041 LATIN CAPITAL LETTER A and U+0391
GREEK CAPITAL LETTER ALPHA are sometimes the same glyph.
"""
result = {}
for subtable in self.tables:
if subtable.isUnicode():
for codepoint, name in subtable.cmap.items():
result.setdefault(name, set()).add(codepoint)
return result
def decompile(self, data, ttFont):
tableVersion, numSubTables = struct.unpack(">HH", data[:4])
self.tableVersion = int(tableVersion)
self.tables = tables = []
seenOffsets = {}
for i in range(numSubTables):
platformID, platEncID, offset = struct.unpack(
">HHl", data[4+i*8:4+(i+1)*8])
platformID, platEncID = int(platformID), int(platEncID)
format, length = struct.unpack(">HH", data[offset:offset+4])
if format in [8,10,12,13]:
format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
elif format in [14]:
format, length = struct.unpack(">HL", data[offset:offset+6])
if not length:
log.error(
"cmap subtable is reported as having zero length: platformID %s, "
"platEncID %s, format %s offset %s. Skipping table.",
platformID, platEncID, format, offset)
continue
table = CmapSubtable.newSubtable(format)
table.platformID = platformID
table.platEncID = platEncID
# Note that by default we decompile only the subtable header info;
# any other data gets decompiled only when an attribute of the
# subtable is referenced.
table.decompileHeader(data[offset:offset+int(length)], ttFont)
if offset in seenOffsets:
table.data = None # Mark as decompiled
table.cmap = tables[seenOffsets[offset]].cmap
else:
seenOffsets[offset] = i
tables.append(table)
def compile(self, ttFont):
self.tables.sort() # sort according to the spec; see CmapSubtable.__lt__()
numSubTables = len(self.tables)
totalOffset = 4 + 8 * numSubTables
data = struct.pack(">HH", self.tableVersion, numSubTables)
tableData = b""
seen = {} # Some tables are the same object reference. Don't compile them twice.
done = {} # Some tables are different objects, but compile to the same data chunk
for table in self.tables:
try:
offset = seen[id(table.cmap)]
except KeyError:
chunk = table.compile(ttFont)
if chunk in done:
offset = done[chunk]
else:
offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
tableData = tableData + chunk
data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
return data + tableData
def toXML(self, writer, ttFont):
writer.simpletag("tableVersion", version=self.tableVersion)
writer.newline()
for table in self.tables:
table.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "tableVersion":
self.tableVersion = safeEval(attrs["version"])
return
if name[:12] != "cmap_format_":
return
if not hasattr(self, "tables"):
self.tables = []
format = safeEval(name[12:])
table = CmapSubtable.newSubtable(format)
table.platformID = safeEval(attrs["platformID"])
table.platEncID = safeEval(attrs["platEncID"])
table.fromXML(name, attrs, content, ttFont)
self.tables.append(table)
class CmapSubtable(object):
@staticmethod
def getSubtableClass(format):
"""Return the subtable class for a format."""
return cmap_classes.get(format, cmap_format_unknown)
@staticmethod
def newSubtable(format):
"""Return a new instance of a subtable for format."""
subtableClass = CmapSubtable.getSubtableClass(format)
return subtableClass(format)
def __init__(self, format):
self.format = format
self.data = None
self.ttFont = None
def __getattr__(self, attr):
# allow lazy decompilation of subtables.
if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
raise AttributeError(attr)
if self.data is None:
raise AttributeError(attr)
self.decompile(None, None) # use saved data.
self.data = None # Once this table has been decompiled, make sure we don't
# just return the original data. Also avoids recursion when
# called with an attribute that the cmap subtable doesn't have.
return getattr(self, attr)
def decompileHeader(self, data, ttFont):
format, length, language = struct.unpack(">HHH", data[:6])
assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
self.format = int(format)
self.length = int(length)
self.language = int(language)
self.data = data[6:]
self.ttFont = ttFont
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("language", self.language),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def getEncoding(self, default=None):
"""Returns the Python encoding name for this cmap subtable based on its platformID,
platEncID, and language. If encoding for these values is not known, by default
None is returned. That can be overriden by passing a value to the default
argument.
Note that if you want to choose a "preferred" cmap subtable, most of the time
self.isUnicode() is what you want as that one only returns true for the modern,
commonly used, Unicode-compatible triplets, not the legacy ones.
"""
return getEncoding(self.platformID, self.platEncID, self.language, default)
def isUnicode(self):
return (self.platformID == 0 or
(self.platformID == 3 and self.platEncID in [0, 1, 10]))
def isSymbol(self):
return self.platformID == 3 and self.platEncID == 0
def _writeCodes(self, codes, writer):
isUnicode = self.isUnicode()
for code, name in codes:
writer.simpletag("map", code=hex(code), name=name)
if isUnicode:
writer.comment(Unicode[code])
writer.newline()
def __lt__(self, other):
if not isinstance(other, CmapSubtable):
return NotImplemented
# implemented so that list.sort() sorts according to the spec.
selfTuple = (
getattr(self, "platformID", None),
getattr(self, "platEncID", None),
getattr(self, "language", None),
self.__dict__)
otherTuple = (
getattr(other, "platformID", None),
getattr(other, "platEncID", None),
getattr(other, "language", None),
other.__dict__)
return selfTuple < otherTuple
class cmap_format_0(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
gids = array.array("B")
gids.frombytes(self.data)
charCodes = list(range(len(gids)))
self.cmap = _make_map(self.ttFont, charCodes, gids)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", 0, 262, self.language) + self.data
cmap = self.cmap
assert set(cmap.keys()).issubset(range(256))
getGlyphID = ttFont.getGlyphID
valueList = [getGlyphID(cmap[i]) if i in cmap else 0 for i in range(256)]
gids = array.array("B", valueList)
data = struct.pack(">HHH", 0, 262, self.language) + gids.tobytes()
assert len(data) == 262
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
subHeaderFormat = ">HHhH"
class SubHeader(object):
def __init__(self):
self.firstCode = None
self.entryCount = None
self.idDelta = None
self.idRangeOffset = None
self.glyphIndexArray = []
class cmap_format_2(CmapSubtable):
def setIDDelta(self, subHeader):
subHeader.idDelta = 0
# find the minGI which is not zero.
minGI = subHeader.glyphIndexArray[0]
for gid in subHeader.glyphIndexArray:
if (gid != 0) and (gid < minGI):
minGI = gid
# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
# We would like to pick an idDelta such that the first glyphArray GID is 1,
# so that we are more likely to be able to combine glypharray GID subranges.
# This means that we have a problem when minGI is > 32K
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + idDelta) % 0x10000),
# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
# negative number to an unsigned short.
if (minGI > 1):
if minGI > 0x7FFF:
subHeader.idDelta = -(0x10000 - minGI) -1
else:
subHeader.idDelta = minGI -1
idDelta = subHeader.idDelta
for i in range(subHeader.entryCount):
gid = subHeader.glyphIndexArray[i]
if gid > 0:
subHeader.glyphIndexArray[i] = gid - idDelta
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
subHeaderKeys = []
maxSubHeaderindex = 0
# get the key array, and determine the number of subHeaders.
allKeys = array.array("H")
allKeys.frombytes(data[:512])
data = data[512:]
if sys.byteorder != "big": allKeys.byteswap()
subHeaderKeys = [ key//8 for key in allKeys]
maxSubHeaderindex = max(subHeaderKeys)
#Load subHeaders
subHeaderList = []
pos = 0
for i in range(maxSubHeaderindex + 1):
subHeader = SubHeader()
(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
pos += 8
giDataPos = pos + subHeader.idRangeOffset-2
giList = array.array("H")
giList.frombytes(data[giDataPos:giDataPos + subHeader.entryCount*2])
if sys.byteorder != "big": giList.byteswap()
subHeader.glyphIndexArray = giList
subHeaderList.append(subHeader)
# How this gets processed.
# Charcodes may be one or two bytes.
# The first byte of a charcode is mapped through the subHeaderKeys, to select
# a subHeader. For any subheader but 0, the next byte is then mapped through the
# selected subheader. If subheader Index 0 is selected, then the byte itself is
# mapped through the subheader, and there is no second byte.
# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
#
# Each subheader references a range in the glyphIndexArray whose length is entryCount.
# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
# referenced by another subheader.
# The only subheader that will be referenced by more than one first-byte value is the subheader
# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
# {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
# A subheader specifies a subrange within (0...256) by the
# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
# (e.g. glyph not in font).
# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by
# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
# Example for Logocut-Medium
# first byte of charcode = 129; selects subheader 1.
# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
# second byte of charCode = 66
# the index offset = 66-64 = 2.
# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
# [glyphIndexArray index], [subrange array index] = glyphIndex
# [256], [0]=1 from charcode [129, 64]
# [257], [1]=2 from charcode [129, 65]
# [258], [2]=3 from charcode [129, 66]
# [259], [3]=4 from charcode [129, 67]
# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero,
# add it to the glyphID to get the final glyphIndex
# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
self.data = b""
cmap = {}
notdefGI = 0
for firstByte in range(256):
subHeadindex = subHeaderKeys[firstByte]
subHeader = subHeaderList[subHeadindex]
if subHeadindex == 0:
if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
continue # gi is notdef.
else:
charCode = firstByte
offsetIndex = firstByte - subHeader.firstCode
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue # gi is notdef.
cmap[charCode] = gi
else:
if subHeader.entryCount:
charCodeOffset = firstByte * 256 + subHeader.firstCode
for offsetIndex in range(subHeader.entryCount):
charCode = charCodeOffset + offsetIndex
gi = subHeader.glyphIndexArray[offsetIndex]
if gi != 0:
gi = (gi + subHeader.idDelta) % 0x10000
else:
continue
cmap[charCode] = gi
# If not subHeader.entryCount, then all char codes with this first byte are
# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the
# same as mapping it to .notdef.
gids = list(cmap.values())
charCodes = list(cmap.keys())
self.cmap = _make_map(self.ttFont, charCodes, gids)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
kEmptyTwoCharCodeRange = -1
notdefGI = 0
items = sorted(self.cmap.items())
charCodes = [item[0] for item in items]
names = [item[1] for item in items]
nameMap = ttFont.getReverseGlyphMap()
try:
gids = [nameMap[name] for name in names]
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = [nameMap[name] for name in names]
except KeyError:
# allow virtual GIDs in format 2 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = int(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
# Process the (char code to gid) item list in char code order.
# By definition, all one byte char codes map to subheader 0.
# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0,
# which defines all char codes in its range to map to notdef) unless proven otherwise.
# Note that since the char code items are processed in char code order, all the char codes with the
# same first byte are in sequential order.
subHeaderKeys = [kEmptyTwoCharCodeRange for x in range(256)] # list of indices into subHeaderList.
subHeaderList = []
# We force this subheader entry 0 to exist in the subHeaderList in the case where some one comes up
# with a cmap where all the one byte char codes map to notdef,
# with the result that the subhead 0 would not get created just by processing the item list.
charCode = charCodes[0]
if charCode > 255:
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 0
subHeaderList.append(subHeader)
lastFirstByte = -1
items = zip(charCodes, gids)
for charCode, gid in items:
if gid == 0:
continue
firstbyte = charCode >> 8
secondByte = charCode & 0x00FF
if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
if lastFirstByte > -1:
# fix GI's and iDelta of current subheader.
self.setIDDelta(subHeader)
# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
# for the indices matching the char codes.
if lastFirstByte == 0:
for index in range(subHeader.entryCount):
charCode = subHeader.firstCode + index
subHeaderKeys[charCode] = 0
assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
# init new subheader
subHeader = SubHeader()
subHeader.firstCode = secondByte
subHeader.entryCount = 1
subHeader.glyphIndexArray.append(gid)
subHeaderList.append(subHeader)
subHeaderKeys[firstbyte] = len(subHeaderList) -1
lastFirstByte = firstbyte
else:
# need to fill in with notdefs all the code points between the last charCode and the current charCode.
codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
for i in range(codeDiff):
subHeader.glyphIndexArray.append(notdefGI)
subHeader.glyphIndexArray.append(gid)
subHeader.entryCount = subHeader.entryCount + codeDiff + 1
# fix GI's and iDelta of last subheader that we we added to the subheader array.
self.setIDDelta(subHeader)
# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
subHeader = SubHeader()
subHeader.firstCode = 0
subHeader.entryCount = 0
subHeader.idDelta = 0
subHeader.idRangeOffset = 2
subHeaderList.append(subHeader)
emptySubheadIndex = len(subHeaderList) - 1
for index in range(256):
if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
subHeaderKeys[index] = emptySubheadIndex
# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with
# charcode 0 and GID 0.
idRangeOffset = (len(subHeaderList)-1)*8 + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
for index in range(subheadRangeLen):
subHeader = subHeaderList[index]
subHeader.idRangeOffset = 0
for j in range(index):
prevSubhead = subHeaderList[j]
if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
subHeader.glyphIndexArray = []
break
if subHeader.idRangeOffset == 0: # didn't find one.
subHeader.idRangeOffset = idRangeOffset
idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
else:
idRangeOffset = idRangeOffset - 8 # one less subheader
# Now we can write out the data!
length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
for subhead in subHeaderList[:-1]:
length = length + len(subhead.glyphIndexArray)*2 # We can't use subhead.entryCount, as some of the subhead may share subArrays.
dataList = [struct.pack(">HHH", 2, length, self.language)]
for index in subHeaderKeys:
dataList.append(struct.pack(">H", index*8))
for subhead in subHeaderList:
dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
for subhead in subHeaderList[:-1]:
for gi in subhead.glyphIndexArray:
dataList.append(struct.pack(">H", gi))
data = bytesjoin(dataList)
assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
return data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
cmap_format_4_format = ">7H"
#uint16 endCode[segCount] # Ending character code for each segment, last = 0xFFFF.
#uint16 reservedPad # This value should be zero
#uint16 startCode[segCount] # Starting character code for each segment
#uint16 idDelta[segCount] # Delta for all character codes in segment
#uint16 idRangeOffset[segCount] # Offset in bytes to glyph indexArray, or 0
#uint16 glyphIndexArray[variable] # Glyph index array
def splitRange(startCode, endCode, cmap):
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently. I can't prove I've got the optimal solution, but it seems
# to do well with the fonts I tested: none became bigger, many became smaller.
if startCode == endCode:
return [], [endCode]
lastID = cmap[startCode]
lastCode = startCode
inOrder = None
orderedBegin = None
subRanges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(startCode + 1, endCode + 1):
glyphID = cmap[code]
if glyphID - 1 == lastID:
if inOrder is None or not inOrder:
inOrder = 1
orderedBegin = lastCode
else:
if inOrder:
inOrder = 0
subRanges.append((orderedBegin, lastCode))
orderedBegin = None
lastID = glyphID
lastCode = code
if inOrder:
subRanges.append((orderedBegin, lastCode))
assert lastCode == endCode
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
newRanges = []
for b, e in subRanges:
if b == startCode and e == endCode:
break # the whole range, we're fine
if b == startCode or e == endCode:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
newRanges.append((b, e))
subRanges = newRanges
if not subRanges:
return [], [endCode]
if subRanges[0][0] != startCode:
subRanges.insert(0, (startCode, subRanges[0][0] - 1))
if subRanges[-1][1] != endCode:
subRanges.append((subRanges[-1][1] + 1, endCode))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(subRanges):
if subRanges[i-1][1] + 1 != subRanges[i][0]:
subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into startCode/endCode lists.
start = []
end = []
for b, e in subRanges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
class cmap_format_4(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
(segCountX2, searchRange, entrySelector, rangeShift) = \
struct.unpack(">4H", data[:8])
data = data[8:]
segCount = segCountX2 // 2
allCodes = array.array("H")
allCodes.frombytes(data)
self.data = data = None
if sys.byteorder != "big": allCodes.byteswap()
# divide the data
endCode = allCodes[:segCount]
allCodes = allCodes[segCount+1:] # the +1 is skipping the reservedPad field
startCode = allCodes[:segCount]
allCodes = allCodes[segCount:]
idDelta = allCodes[:segCount]
allCodes = allCodes[segCount:]
idRangeOffset = allCodes[:segCount]
glyphIndexArray = allCodes[segCount:]
lenGIArray = len(glyphIndexArray)
# build 2-byte character mapping
charCodes = []
gids = []
for i in range(len(startCode) - 1): # don't do 0xffff!
start = startCode[i]
delta = idDelta[i]
rangeOffset = idRangeOffset[i]
# *someone* needs to get killed.
partial = rangeOffset // 2 - start + i - len(idRangeOffset)
rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
charCodes.extend(rangeCharCodes)
if rangeOffset == 0:
gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
else:
for charCode in rangeCharCodes:
index = charCode + partial
assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array is not less than the length of the array (%d) !" % (i, index, lenGIArray)
if glyphIndexArray[index] != 0: # if not missing glyph
glyphID = glyphIndexArray[index] + delta
else:
glyphID = 0 # missing glyph
gids.append(glyphID & 0xFFFF)
self.cmap = _make_map(self.ttFont, charCodes, gids)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
charCodes = list(self.cmap.keys())
if not charCodes:
startCode = [0xffff]
endCode = [0xffff]
else:
charCodes.sort()
names = [self.cmap[code] for code in charCodes]
nameMap = ttFont.getReverseGlyphMap()
try:
gids = [nameMap[name] for name in names]
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = [nameMap[name] for name in names]
except KeyError:
# allow virtual GIDs in format 4 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = int(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
for code, gid in zip(charCodes, gids):
cmap[code] = gid
# Build startCode and endCode lists.
# Split the char codes in ranges of consecutive char codes, then split
# each range in more ranges of consecutive/not consecutive glyph IDs.
# See splitRange().
lastCode = charCodes[0]
endCode = []
startCode = [lastCode]
for charCode in charCodes[1:]: # skip the first code, it's the first start code
if charCode == lastCode + 1:
lastCode = charCode
continue
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(charCode)
lastCode = charCode
start, end = splitRange(startCode[-1], lastCode, cmap)
startCode.extend(start)
endCode.extend(end)
startCode.append(0xffff)
endCode.append(0xffff)
# build up rest of cruft
idDelta = []
idRangeOffset = []
glyphIndexArray = []
for i in range(len(endCode)-1): # skip the closing codes (0xffff)
indices = []
for charCode in range(startCode[i], endCode[i] + 1):
indices.append(cmap[charCode])
if (indices == list(range(indices[0], indices[0] + len(indices)))):
idDelta.append((indices[0] - startCode[i]) % 0x10000)
idRangeOffset.append(0)
else:
# someone *definitely* needs to get killed.
idDelta.append(0)
idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
glyphIndexArray.extend(indices)
idDelta.append(1) # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
idRangeOffset.append(0)
# Insane.
segCount = len(endCode)
segCountX2 = segCount * 2
searchRange, entrySelector, rangeShift = getSearchRange(segCount, 2)
charCodeArray = array.array("H", endCode + [0] + startCode)
idDeltaArray = array.array("H", idDelta)
restArray = array.array("H", idRangeOffset + glyphIndexArray)
if sys.byteorder != "big": charCodeArray.byteswap()
if sys.byteorder != "big": idDeltaArray.byteswap()
if sys.byteorder != "big": restArray.byteswap()
data = charCodeArray.tobytes() + idDeltaArray.tobytes() + restArray.tobytes()
length = struct.calcsize(cmap_format_4_format) + len(data)
header = struct.pack(cmap_format_4_format, self.format, length, self.language,
segCountX2, searchRange, entrySelector, rangeShift)
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
nameMap, attrsMap, dummyContent = element
if nameMap != "map":
assert 0, "Unrecognized keyword in cmap subtable"
cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
class cmap_format_6(CmapSubtable):
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
firstCode, entryCount = struct.unpack(">HH", data[:4])
firstCode = int(firstCode)
data = data[4:]
#assert len(data) == 2 * entryCount # XXX not true in Apple's Helvetica!!!
gids = array.array("H")
gids.frombytes(data[:2 * int(entryCount)])
if sys.byteorder != "big": gids.byteswap()
self.data = data = None
charCodes = list(range(firstCode, firstCode + len(gids)))
self.cmap = _make_map(self.ttFont, charCodes, gids)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = sorted(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [
ttFont.getGlyphID(cmap[code]) if code in cmap else 0
for code in codes
]
gids = array.array("H", valueList)
if sys.byteorder != "big": gids.byteswap()
data = gids.tobytes()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def fromXML(self, name, attrs, content, ttFont):
self.language = safeEval(attrs["language"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12_or_13(CmapSubtable):
def __init__(self, format):
self.format = format
self.reserved = 0
self.data = None
self.ttFont = None
def decompileHeader(self, data, ttFont):
format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (self.format, len(data), length)
self.format = format
self.reserved = reserved
self.length = length
self.language = language
self.nGroups = nGroups
self.data = data[16:]
self.ttFont = ttFont
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data # decompileHeader assigns the data after the header to self.data
charCodes = []
gids = []
pos = 0
for i in range(self.nGroups):
startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
pos += 12
lenGroup = 1 + endCharCode - startCharCode
charCodes.extend(list(range(startCharCode, endCharCode +1)))
gids.extend(self._computeGIDs(glyphID, lenGroup))
self.data = data = None
self.cmap = _make_map(self.ttFont, charCodes, gids)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
charCodes = list(self.cmap.keys())
names = list(self.cmap.values())
nameMap = ttFont.getReverseGlyphMap()
try:
gids = [nameMap[name] for name in names]
except KeyError:
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
try:
gids = [nameMap[name] for name in names]
except KeyError:
# allow virtual GIDs in format 12 tables
gids = []
for name in names:
try:
gid = nameMap[name]
except KeyError:
try:
if (name[:3] == 'gid'):
gid = int(name[3:])
else:
gid = ttFont.getGlyphID(name)
except:
raise KeyError(name)
gids.append(gid)
cmap = {} # code:glyphID mapping
for code, gid in zip(charCodes, gids):
cmap[code] = gid
charCodes.sort()
index = 0
startCharCode = charCodes[0]
startGlyphID = cmap[startCharCode]
lastGlyphID = startGlyphID - self._format_step
lastCharCode = startCharCode - 1
nGroups = 0
dataList = []
maxIndex = len(charCodes)
for index in range(maxIndex):
charCode = charCodes[index]
glyphID = cmap[charCode]
if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
startCharCode = charCode
startGlyphID = glyphID
nGroups = nGroups + 1
lastGlyphID = glyphID
lastCharCode = charCode
dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
nGroups = nGroups + 1
data = bytesjoin(dataList)
lengthSubtable = len(data) +16
assert len(data) == (nGroups*12) == (lengthSubtable-16)
return struct.pack(">HHLLL", self.format, self.reserved, lengthSubtable, self.language, nGroups) + data
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
("format", self.format),
("reserved", self.reserved),
("length", self.length),
("language", self.language),
("nGroups", self.nGroups),
])
writer.newline()
codes = sorted(self.cmap.items())
self._writeCodes(codes, writer)
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.format = safeEval(attrs["format"])
self.reserved = safeEval(attrs["reserved"])
self.length = safeEval(attrs["length"])
self.language = safeEval(attrs["language"])
self.nGroups = safeEval(attrs["nGroups"])
if not hasattr(self, "cmap"):
self.cmap = {}
cmap = self.cmap
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
cmap[safeEval(attrs["code"])] = attrs["name"]
class cmap_format_12(cmap_format_12_or_13):
_format_step = 1
def __init__(self, format=12):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
class cmap_format_13(cmap_format_12_or_13):
_format_step = 0
def __init__(self, format=13):
cmap_format_12_or_13.__init__(self, format)
def _computeGIDs(self, startingGlyph, numberOfGlyphs):
return [startingGlyph] * numberOfGlyphs
def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
def cvtToUVS(threeByteString):
data = b"\0" + threeByteString
val, = struct.unpack(">L", data)
return val
def cvtFromUVS(val):
assert 0 <= val < 0x1000000
fourByteString = struct.pack(">L", val)
return fourByteString[1:]
class cmap_format_14(CmapSubtable):
def decompileHeader(self, data, ttFont):
format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
self.data = data[10:]
self.length = length
self.numVarSelectorRecords = numVarSelectorRecords
self.ttFont = ttFont
self.language = 0xFF # has no language.
def decompile(self, data, ttFont):
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
data = self.data
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
uvsDict = {}
recOffset = 0
for n in range(self.numVarSelectorRecords):
uvs, defOVSOffset, nonDefUVSOffset = struct.unpack(">3sLL", data[recOffset:recOffset +11])
recOffset += 11
varUVS = cvtToUVS(uvs)
if defOVSOffset:
startOffset = defOVSOffset - 10
numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
for r in range(numValues):
uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
startOffset += 4
firstBaseUV = cvtToUVS(uv)
cnt = addtlCnt+1
baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
glyphList = [None]*cnt
localUVList = zip(baseUVList, glyphList)
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = list(localUVList)
if nonDefUVSOffset:
startOffset = nonDefUVSOffset - 10
numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
startOffset +=4
localUVList = []
for r in range(numRecs):
uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
startOffset += 5
uv = cvtToUVS(uv)
glyphName = self.ttFont.getGlyphName(gid)
localUVList.append((uv, glyphName))
try:
uvsDict[varUVS].extend(localUVList)
except KeyError:
uvsDict[varUVS] = localUVList
self.uvsDict = uvsDict
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
for uvs in uvsList:
uvList = uvsDict[uvs]
uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
for uv, gname in uvList:
attrs = [("uv", hex(uv)), ("uvs", hex(uvs))]
if gname is not None:
attrs.append(("name", gname))
writer.simpletag("map", attrs)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.language = 0xFF # provide a value so that CmapSubtable.__lt__() won't fail
if not hasattr(self, "cmap"):
self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
if not hasattr(self, "uvsDict"):
self.uvsDict = {}
uvsDict = self.uvsDict
# For backwards compatibility reasons we accept "None" as an indicator
# for "default mapping", unless the font actually has a glyph named
# "None".
_hasGlyphNamedNone = None
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name != "map":
continue
uvs = safeEval(attrs["uvs"])
uv = safeEval(attrs["uv"])
gname = attrs.get("name")
if gname == "None":
if _hasGlyphNamedNone is None:
_hasGlyphNamedNone = "None" in ttFont.getGlyphOrder()
if not _hasGlyphNamedNone:
gname = None
try:
uvsDict[uvs].append((uv, gname))
except KeyError:
uvsDict[uvs] = [(uv, gname)]
def compile(self, ttFont):
if self.data:
return struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords) + self.data
uvsDict = self.uvsDict
uvsList = sorted(uvsDict.keys())
self.numVarSelectorRecords = len(uvsList)
offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
data = []
varSelectorRecords =[]
for uvs in uvsList:
entryList = uvsDict[uvs]
defList = [entry for entry in entryList if entry[1] is None]
if defList:
defList = [entry[0] for entry in defList]
defOVSOffset = offset
defList.sort()
lastUV = defList[0]
cnt = -1
defRecs = []
for defEntry in defList:
cnt +=1
if (lastUV+cnt) != defEntry:
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
lastUV = defEntry
defRecs.append(rec)
cnt = 0
rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
defRecs.append(rec)
numDefRecs = len(defRecs)
data.append(struct.pack(">L", numDefRecs))
data.extend(defRecs)
offset += 4 + numDefRecs*4
else:
defOVSOffset = 0
ndefList = [entry for entry in entryList if entry[1] is not None]
if ndefList:
nonDefUVSOffset = offset
ndefList.sort()
numNonDefRecs = len(ndefList)
data.append(struct.pack(">L", numNonDefRecs))
offset += 4 + numNonDefRecs*5
for uv, gname in ndefList:
gid = ttFont.getGlyphID(gname)
ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
data.append(ndrec)
else:
nonDefUVSOffset = 0
vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
varSelectorRecords.append(vrec)
data = bytesjoin(varSelectorRecords) + bytesjoin(data)
self.length = 10 + len(data)
headerdata = struct.pack(">HLL", self.format, self.length, self.numVarSelectorRecords)
return headerdata + data
class cmap_format_unknown(CmapSubtable):
def toXML(self, writer, ttFont):
cmapName = self.__class__.__name__[:12] + str(self.format)
writer.begintag(cmapName, [
("platformID", self.platformID),
("platEncID", self.platEncID),
])
writer.newline()
writer.dumphex(self.data)
writer.endtag(cmapName)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.data = readHex(content)
self.cmap = {}
def decompileHeader(self, data, ttFont):
self.language = 0 # dummy value
self.data = data
def decompile(self, data, ttFont):
# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
# If not, someone is calling the subtable decompile() directly, and must provide both args.
if data is not None and ttFont is not None:
self.decompileHeader(data, ttFont)
else:
assert (data is None and ttFont is None), "Need both data and ttFont arguments"
def compile(self, ttFont):
if self.data:
return self.data
else:
return None
cmap_classes = {
0: cmap_format_0,
2: cmap_format_2,
4: cmap_format_4,
6: cmap_format_6,
12: cmap_format_12,
13: cmap_format_13,
14: cmap_format_14,
}
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_m_a_p.py",
"copies": "5",
"size": "45373",
"license": "apache-2.0",
"hash": -3849395560030746000,
"line_mean": 33.8219493477,
"line_max": 191,
"alpha_frac": 0.6909836246,
"autogenerated": false,
"ratio": 3.140217316077237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6331200940677237,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin
from . import DefaultTable
from fontTools.misc import sstruct
from fontTools.ttLib.tables.TupleVariation import \
compileTupleVariationStore, decompileTupleVariationStore, TupleVariation
# https://www.microsoft.com/typography/otspec/cvar.htm
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html
CVAR_HEADER_FORMAT = """
> # big endian
majorVersion: H
minorVersion: H
tupleVariationCount: H
offsetToData: H
"""
CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT)
class table__c_v_a_r(DefaultTable.DefaultTable):
dependencies = ["cvt ", "fvar"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.majorVersion, self.minorVersion = 1, 0
self.variations = []
def compile(self, ttFont, useSharedPoints=False):
tupleVariationCount, tuples, data = compileTupleVariationStore(
variations=[v for v in self.variations if v.hasImpact()],
pointCount=len(ttFont["cvt "].values),
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
sharedTupleIndices={},
useSharedPoints=useSharedPoints)
header = {
"majorVersion": self.majorVersion,
"minorVersion": self.minorVersion,
"tupleVariationCount": tupleVariationCount,
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
}
return b''.join([
sstruct.pack(CVAR_HEADER_FORMAT, header),
tuples,
data
])
def decompile(self, data, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
header = {}
sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header)
self.majorVersion = header["majorVersion"]
self.minorVersion = header["minorVersion"]
assert self.majorVersion == 1, self.majorVersion
self.variations = decompileTupleVariationStore(
tableTag=self.tableTag, axisTags=axisTags,
tupleVariationCount=header["tupleVariationCount"],
pointCount=len(ttFont["cvt "].values), sharedTuples=None,
data=data, pos=CVAR_HEADER_SIZE, dataPos=header["offsetToData"])
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.majorVersion = int(attrs.get("major", "1"))
self.minorVersion = int(attrs.get("minor", "0"))
elif name == "tuple":
valueCount = len(ttFont["cvt "].values)
var = TupleVariation({}, [None] * valueCount)
self.variations.append(var)
for tupleElement in content:
if isinstance(tupleElement, tuple):
tupleName, tupleAttrs, tupleContent = tupleElement
var.fromXML(tupleName, tupleAttrs, tupleContent)
def toXML(self, writer, ttFont):
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
writer.simpletag("version",
major=self.majorVersion, minor=self.minorVersion)
writer.newline()
for var in self.variations:
var.toXML(writer, axisTags)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_a_r.py",
"copies": "4",
"size": "3294",
"license": "apache-2.0",
"hash": 5658607622294245000,
"line_mean": 39.1707317073,
"line_max": 81,
"alpha_frac": 0.6326654523,
"autogenerated": false,
"ratio": 3.8346915017462164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 82
} |
from fontTools.misc.py23 import bytesjoin, strjoin
from fontTools.misc import sstruct
from fontTools.misc.textTools import readHex
from fontTools.ttLib import TTLibError
from . import DefaultTable
# Apple's documentation of 'meta':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html
META_HEADER_FORMAT = """
> # big endian
version: L
flags: L
dataOffset: L
numDataMaps: L
"""
DATA_MAP_FORMAT = """
> # big endian
tag: 4s
dataOffset: L
dataLength: L
"""
class table__m_e_t_a(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.data = {}
def decompile(self, data, ttFont):
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
header = sstruct.unpack(META_HEADER_FORMAT, data[0 : headerSize])
if header["version"] != 1:
raise TTLibError("unsupported 'meta' version %d" %
header["version"])
dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT)
for i in range(header["numDataMaps"]):
dataMapOffset = headerSize + i * dataMapSize
dataMap = sstruct.unpack(
DATA_MAP_FORMAT,
data[dataMapOffset : dataMapOffset + dataMapSize])
tag = dataMap["tag"]
offset = dataMap["dataOffset"]
self.data[tag] = data[offset : offset + dataMap["dataLength"]]
if tag in ["dlng", "slng"]:
self.data[tag] = self.data[tag].decode("utf-8")
def compile(self, ttFont):
keys = sorted(self.data.keys())
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
header = sstruct.pack(META_HEADER_FORMAT, {
"version": 1,
"flags": 0,
"dataOffset": dataOffset,
"numDataMaps": len(keys)
})
dataMaps = []
dataBlocks = []
for tag in keys:
if tag in ["dlng", "slng"]:
data = self.data[tag].encode("utf-8")
else:
data = self.data[tag]
dataMaps.append(sstruct.pack(DATA_MAP_FORMAT, {
"tag": tag,
"dataOffset": dataOffset,
"dataLength": len(data)
}))
dataBlocks.append(data)
dataOffset += len(data)
return bytesjoin([header] + dataMaps + dataBlocks)
def toXML(self, writer, ttFont):
for tag in sorted(self.data.keys()):
if tag in ["dlng", "slng"]:
writer.begintag("text", tag=tag)
writer.newline()
writer.write(self.data[tag])
writer.newline()
writer.endtag("text")
writer.newline()
else:
writer.begintag("hexdata", tag=tag)
writer.newline()
data = self.data[tag]
if min(data) >= 0x20 and max(data) <= 0x7E:
writer.comment("ascii: " + data.decode("ascii"))
writer.newline()
writer.dumphex(data)
writer.endtag("hexdata")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "hexdata":
self.data[attrs["tag"]] = readHex(content)
elif name == "text" and attrs["tag"] in ["dlng", "slng"]:
self.data[attrs["tag"]] = strjoin(content).strip()
else:
raise TTLibError("can't handle '%s' element" % name)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_m_e_t_a.py",
"copies": "5",
"size": "3649",
"license": "apache-2.0",
"hash": 612219520178398200,
"line_mean": 34.7745098039,
"line_max": 81,
"alpha_frac": 0.5374075089,
"autogenerated": false,
"ratio": 3.890191897654584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6927599406554584,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
from fontTools.misc import sstruct
from . import DefaultTable
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from io import BytesIO
import struct
import logging
log = logging.getLogger(__name__)
__doc__="""
Compiles/decompiles version 0 and 1 SVG tables from/to XML.
Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated.
This module will decompile this correctly, but will compile a version 1 table
only if you add the secret element "<version1/>" to the SVG element in the TTF file.
Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes.
The XML format is:
<SVG>
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[ <complete SVG doc> ]]
</svgDoc>
...
<svgDoc endGlyphID="n" startGlyphID="m">
<![CDATA[ <complete SVG doc> ]]
</svgDoc>
<colorPalettes>
<colorParamUINameID>n</colorParamUINameID>
...
<colorParamUINameID>m</colorParamUINameID>
<colorPalette uiNameID="n">
<colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
...
<colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
</colorPalette>
...
<colorPalette uiNameID="m">
<colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" />
...
<colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" />
</colorPalette>
</colorPalettes>
</SVG>
Color values must be less than 256.
The number of color records in each </colorPalette> must be the same as
the number of <colorParamUINameID> elements.
"""
XML = ET.XML
XMLElement = ET.Element
xmlToString = ET.tostring
SVG_format_0 = """
> # big endian
version: H
offsetToSVGDocIndex: L
offsetToColorPalettes: L
"""
SVG_format_0Size = sstruct.calcsize(SVG_format_0)
SVG_format_1 = """
> # big endian
version: H
numIndicies: H
"""
SVG_format_1Size = sstruct.calcsize(SVG_format_1)
doc_index_entry_format_0 = """
> # big endian
startGlyphID: H
endGlyphID: H
svgDocOffset: L
svgDocLength: L
"""
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
colorRecord_format_0 = """
red: B
green: B
blue: B
alpha: B
"""
class table_S_V_G_(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.colorPalettes = None
def decompile(self, data, ttFont):
self.docList = None
self.colorPalettes = None
pos = 0
self.version = struct.unpack(">H", data[pos:pos+2])[0]
if self.version == 1:
# This is pre-standardization version of the table; and obsolete. But we decompile it for now.
# https://wiki.mozilla.org/SVGOpenTypeFonts
self.decompile_format_1(data, ttFont)
else:
if self.version != 0:
log.warning(
"Unknown SVG table version '%s'. Decompiling as version 0.", self.version)
# This is the standardized version of the table; and current.
# https://www.microsoft.com/typography/otspec/svg.htm
self.decompile_format_0(data, ttFont)
def decompile_format_0(self, data, ttFont):
dummy, data2 = sstruct.unpack2(SVG_format_0, data, self)
# read in SVG Documents Index
self.decompileEntryList(data)
# read in colorPalettes table.
self.colorPalettes = colorPalettes = ColorPalettes()
pos = self.offsetToColorPalettes
if pos > 0:
colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0]
if numColorParams > 0:
colorPalettes.colorParamUINameIDs = colorParamUINameIDs = []
pos = pos + 2
for i in range(numColorParams):
nameID = struct.unpack(">H", data[pos:pos+2])[0]
colorParamUINameIDs.append(nameID)
pos = pos + 2
colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0]
pos = pos + 2
if numColorPalettes > 0:
colorPalettes.colorPaletteList = colorPaletteList = []
for i in range(numColorPalettes):
colorPalette = ColorPalette()
colorPaletteList.append(colorPalette)
colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0]
pos = pos + 2
colorPalette.paletteColors = paletteColors = []
for j in range(numColorParams):
colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord())
paletteColors.append(colorRecord)
pos += 4
def decompile_format_1(self, data, ttFont):
self.offsetToSVGDocIndex = 2
self.decompileEntryList(data)
def decompileEntryList(self, data):
# data starts with the first entry of the entry list.
pos = subTableStart = self.offsetToSVGDocIndex
self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0]
pos += 2
if self.numEntries > 0:
data2 = data[pos:]
self.docList = []
self.entries = entries = []
for i in range(self.numEntries):
docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
entries.append(docIndexEntry)
for entry in entries:
start = entry.svgDocOffset + subTableStart
end = start + entry.svgDocLength
doc = data[start:end]
if doc.startswith(b"\x1f\x8b"):
import gzip
bytesIO = BytesIO(doc)
with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
doc = gunzipper.read()
self.compressed = True
del bytesIO
doc = tostr(doc, "utf_8")
self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] )
def compile(self, ttFont):
if hasattr(self, "version1"):
data = self.compileFormat1(ttFont)
else:
data = self.compileFormat0(ttFont)
return data
def compileFormat0(self, ttFont):
version = 0
offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
# get SGVDoc info.
docList = []
entryList = []
numEntries = len(self.docList)
datum = struct.pack(">H",numEntries)
entryList.append(datum)
curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
for doc, startGlyphID, endGlyphID in self.docList:
docOffset = curOffset
docBytes = tobytes(doc, encoding="utf_8")
if getattr(self, "compressed", False) and not docBytes.startswith(b"\x1f\x8b"):
import gzip
bytesIO = BytesIO()
with gzip.GzipFile(None, "w", fileobj=bytesIO) as gzipper:
gzipper.write(docBytes)
gzipped = bytesIO.getvalue()
if len(gzipped) < len(docBytes):
docBytes = gzipped
del gzipped, bytesIO
docLength = len(docBytes)
curOffset += docLength
entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
entryList.append(entry)
docList.append(docBytes)
entryList.extend(docList)
svgDocData = bytesjoin(entryList)
# get colorpalette info.
if self.colorPalettes is None:
offsetToColorPalettes = 0
palettesData = ""
else:
offsetToColorPalettes = SVG_format_0Size + len(svgDocData)
dataList = []
numColorParams = len(self.colorPalettes.colorParamUINameIDs)
datum = struct.pack(">H", numColorParams)
dataList.append(datum)
for uiNameId in self.colorPalettes.colorParamUINameIDs:
datum = struct.pack(">H", uiNameId)
dataList.append(datum)
numColorPalettes = len(self.colorPalettes.colorPaletteList)
datum = struct.pack(">H", numColorPalettes)
dataList.append(datum)
for colorPalette in self.colorPalettes.colorPaletteList:
datum = struct.pack(">H", colorPalette.uiNameID)
dataList.append(datum)
for colorRecord in colorPalette.paletteColors:
data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha)
dataList.append(data)
palettesData = bytesjoin(dataList)
header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes)
data = [header, svgDocData, palettesData]
data = bytesjoin(data)
return data
def compileFormat1(self, ttFont):
version = 1
numEntries = len(self.docList)
header = struct.pack(">HH", version, numEntries)
dataList = [header]
docList = []
curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries
for doc, startGlyphID, endGlyphID in self.docList:
docOffset = curOffset
docBytes = tobytes(doc, encoding="utf_8")
docLength = len(docBytes)
curOffset += docLength
entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
dataList.append(entry)
docList.append(docBytes)
dataList.extend(docList)
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.newline()
for doc, startGID, endGID in self.docList:
writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID)
writer.newline()
writer.writecdata(doc)
writer.newline()
writer.endtag("svgDoc")
writer.newline()
if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None):
writer.begintag("colorPalettes")
writer.newline()
for uiNameID in self.colorPalettes.colorParamUINameIDs:
writer.begintag("colorParamUINameID")
writer._writeraw(str(uiNameID))
writer.endtag("colorParamUINameID")
writer.newline()
for colorPalette in self.colorPalettes.colorPaletteList:
writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))])
writer.newline()
for colorRecord in colorPalette.paletteColors:
colorAttributes = [
("red", hex(colorRecord.red)),
("green", hex(colorRecord.green)),
("blue", hex(colorRecord.blue)),
("alpha", hex(colorRecord.alpha)),
]
writer.begintag("colorRecord", colorAttributes)
writer.endtag("colorRecord")
writer.newline()
writer.endtag("colorPalette")
writer.newline()
writer.endtag("colorPalettes")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "svgDoc":
if not hasattr(self, "docList"):
self.docList = []
doc = strjoin(content)
doc = doc.strip()
startGID = int(attrs["startGlyphID"])
endGID = int(attrs["endGlyphID"])
self.docList.append( [doc, startGID, endGID] )
elif name == "colorPalettes":
self.colorPalettes = ColorPalettes()
self.colorPalettes.fromXML(name, attrs, content, ttFont)
if self.colorPalettes.numColorParams == 0:
self.colorPalettes = None
else:
log.warning("Unknown %s %s", name, content)
class DocumentIndexEntry(object):
def __init__(self):
self.startGlyphID = None # USHORT
self.endGlyphID = None # USHORT
self.svgDocOffset = None # ULONG
self.svgDocLength = None # ULONG
def __repr__(self):
return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
class ColorPalettes(object):
def __init__(self):
self.numColorParams = None # USHORT
self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette.
self.numColorPalettes = None # USHORT
self.colorPaletteList = [] # list of ColorPalette records
def fromXML(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrib, content = element
if name == "colorParamUINameID":
uiNameID = int(content[0])
self.colorParamUINameIDs.append(uiNameID)
elif name == "colorPalette":
colorPalette = ColorPalette()
self.colorPaletteList.append(colorPalette)
colorPalette.fromXML(name, attrib, content, ttFont)
self.numColorParams = len(self.colorParamUINameIDs)
self.numColorPalettes = len(self.colorPaletteList)
for colorPalette in self.colorPaletteList:
if len(colorPalette.paletteColors) != self.numColorParams:
raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams))
class ColorPalette(object):
def __init__(self):
self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette.
self.paletteColors = [] # list of ColorRecords
def fromXML(self, name, attrs, content, ttFont):
self.uiNameID = int(attrs["uiNameID"])
for element in content:
if isinstance(element, type("")):
continue
name, attrib, content = element
if name == "colorRecord":
colorRecord = ColorRecord()
self.paletteColors.append(colorRecord)
colorRecord.red = eval(attrib["red"])
colorRecord.green = eval(attrib["green"])
colorRecord.blue = eval(attrib["blue"])
colorRecord.alpha = eval(attrib["alpha"])
class ColorRecord(object):
def __init__(self):
self.red = 255 # all are one byte values.
self.green = 255
self.blue = 255
self.alpha = 255
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/S_V_G_.py",
"copies": "5",
"size": "12818",
"license": "apache-2.0",
"hash": 6800704520710339000,
"line_mean": 32.207253886,
"line_max": 195,
"alpha_frac": 0.6949602122,
"autogenerated": false,
"ratio": 3.0709151892668904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.626587540146689,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin, strjoin, tobytes, tostr
from fontTools.misc.textTools import safeEval
from fontTools.misc import sstruct
from . import DefaultTable
import base64
DSIG_HeaderFormat = """
> # big endian
ulVersion: L
usNumSigs: H
usFlag: H
"""
# followed by an array of usNumSigs DSIG_Signature records
DSIG_SignatureFormat = """
> # big endian
ulFormat: L
ulLength: L # length includes DSIG_SignatureBlock header
ulOffset: L
"""
# followed by an array of usNumSigs DSIG_SignatureBlock records,
# each followed immediately by the pkcs7 bytes
DSIG_SignatureBlockFormat = """
> # big endian
usReserved1: H
usReserved2: H
cbSignature: l # length of following raw pkcs7 data
"""
#
# NOTE
# the DSIG table format allows for SignatureBlocks residing
# anywhere in the table and possibly in a different order as
# listed in the array after the first table header
#
# this implementation does not keep track of any gaps and/or data
# before or after the actual signature blocks while decompiling,
# and puts them in the same physical order as listed in the header
# on compilation with no padding whatsoever.
#
class table_D_S_I_G_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
self.signatureRecords = sigrecs = []
for n in range(self.usNumSigs):
sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
sigrecs.append(sigrec)
for sigrec in sigrecs:
dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
sigrec.pkcs7 = newData[:sigrec.cbSignature]
def compile(self, ttFont):
packed = sstruct.pack(DSIG_HeaderFormat, self)
headers = [packed]
offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
data = []
for sigrec in self.signatureRecords:
# first pack signature block
sigrec.cbSignature = len(sigrec.pkcs7)
packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
data.append(packed)
# update redundant length field
sigrec.ulLength = len(packed)
# update running table offset
sigrec.ulOffset = offset
headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
offset += sigrec.ulLength
if offset % 2:
# Pad to even bytes
data.append(b'\0')
return bytesjoin(headers+data)
def toXML(self, xmlWriter, ttFont):
xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
xmlWriter.newline()
xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
for sigrec in self.signatureRecords:
xmlWriter.newline()
sigrec.toXML(xmlWriter, ttFont)
xmlWriter.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "tableHeader":
self.signatureRecords = []
self.ulVersion = safeEval(attrs["version"])
self.usNumSigs = safeEval(attrs["numSigs"])
self.usFlag = safeEval(attrs["flag"])
return
if name == "SignatureRecord":
sigrec = SignatureRecord()
sigrec.fromXML(name, attrs, content, ttFont)
self.signatureRecords.append(sigrec)
pem_spam = lambda l, spam = {
"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
}: not spam.get(l.strip())
def b64encode(b):
s = base64.b64encode(b)
# Line-break at 76 chars.
items = []
while s:
items.append(tostr(s[:76]))
items.append('\n')
s = s[76:]
return strjoin(items)
class SignatureRecord(object):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, format=self.ulFormat)
writer.newline()
writer.write_noindent("-----BEGIN PKCS7-----\n")
writer.write_noindent(b64encode(self.pkcs7))
writer.write_noindent("-----END PKCS7-----\n")
writer.endtag(self.__class__.__name__)
def fromXML(self, name, attrs, content, ttFont):
self.ulFormat = safeEval(attrs["format"])
self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/D_S_I_G_.py",
"copies": "5",
"size": "4603",
"license": "apache-2.0",
"hash": 4048551331028778500,
"line_mean": 34.4076923077,
"line_max": 111,
"alpha_frac": 0.7091027591,
"autogenerated": false,
"ratio": 3.091336467427804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6300439226527804,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin, tobytes
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import struct
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
class table__l_t_a_g(DefaultTable.DefaultTable):
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.version, self.flags = 1, 0
self.tags = []
def addTag(self, tag):
"""Add 'tag' to the list of langauge tags if not already there.
Returns the integer index of 'tag' in the list of all tags.
"""
try:
return self.tags.index(tag)
except ValueError:
self.tags.append(tag)
return len(self.tags) - 1
def decompile(self, data, ttFont):
self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
assert self.version == 1
self.tags = []
for i in range(numTags):
pos = 12 + i * 4
offset, length = struct.unpack(">HH", data[pos:pos+4])
tag = data[offset:offset+length].decode("ascii")
self.tags.append(tag)
def compile(self, ttFont):
dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
stringPool = ""
for tag in self.tags:
offset = stringPool.find(tag)
if offset < 0:
offset = len(stringPool)
stringPool = stringPool + tag
offset = offset + 12 + len(self.tags) * 4
dataList.append(struct.pack(">HH", offset, len(tag)))
dataList.append(tobytes(stringPool))
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
writer.simpletag("flags", value=self.flags)
writer.newline()
for tag in self.tags:
writer.simpletag("LanguageTag", tag=tag)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "tags"):
self.tags = []
if name == "LanguageTag":
self.tags.append(attrs["tag"])
elif "value" in attrs:
value = safeEval(attrs["value"])
setattr(self, name, value)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_l_t_a_g.py",
"copies": "5",
"size": "1952",
"license": "apache-2.0",
"hash": 65653451998629304,
"line_mean": 29.5,
"line_max": 81,
"alpha_frac": 0.6854508197,
"autogenerated": false,
"ratio": 2.9892802450229707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.617473106472297,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin, tobytes, tostr
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
ensureVersionIsLong as fi2ve,
versionToFixed as ve2fi,
)
from fontTools.misc.textTools import pad, safeEval
from fontTools.ttLib import getSearchRange
from .otBase import (CountReference, FormatSwitchingBaseTable,
OTTableReader, OTTableWriter, ValueRecordFactory)
from .otTables import (lookupTypes, AATStateTable, AATState, AATAction,
ContextualMorphAction, LigatureMorphAction,
InsertionMorphAction, MorxSubtable, VariableFloat,
VariableInt, ExtendMode as _ExtendMode,
CompositeMode as _CompositeMode)
from itertools import zip_longest
from functools import partial
import struct
import logging
log = logging.getLogger(__name__)
istuple = lambda t: isinstance(t, tuple)
def buildConverters(tableSpec, tableNamespace):
"""Given a table spec from otData.py, build a converter object for each
field of the table. This is called for each table in otData.py, and
the results are assigned to the corresponding class in otTables.py."""
converters = []
convertersByName = {}
for tp, name, repeat, aux, descr in tableSpec:
tableName = name
if name.startswith("ValueFormat"):
assert tp == "uint16"
converterClass = ValueFormat
elif name.endswith("Count") or name in ("StructLength", "MorphType"):
converterClass = {
"uint8": ComputedUInt8,
"uint16": ComputedUShort,
"uint32": ComputedULong,
}[tp]
elif name == "SubTable":
converterClass = SubTable
elif name == "ExtSubTable":
converterClass = ExtSubTable
elif name == "SubStruct":
converterClass = SubStruct
elif name == "FeatureParams":
converterClass = FeatureParams
elif name in ("CIDGlyphMapping", "GlyphCIDMapping"):
converterClass = StructWithLength
else:
if not tp in converterMapping and '(' not in tp:
tableName = tp
converterClass = Struct
else:
converterClass = eval(tp, tableNamespace, converterMapping)
conv = converterClass(name, repeat, aux)
if conv.tableClass:
# A "template" such as OffsetTo(AType) knowss the table class already
tableClass = conv.tableClass
elif tp in ('MortChain', 'MortSubtable', 'MorxChain'):
tableClass = tableNamespace.get(tp)
else:
tableClass = tableNamespace.get(tableName)
if not conv.tableClass:
conv.tableClass = tableClass
if name in ["SubTable", "ExtSubTable", "SubStruct"]:
conv.lookupTypes = tableNamespace['lookupTypes']
# also create reverse mapping
for t in conv.lookupTypes.values():
for cls in t.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
if name == "FeatureParams":
conv.featureParamTypes = tableNamespace['featureParamTypes']
conv.defaultFeatureParams = tableNamespace['FeatureParams']
for cls in conv.featureParamTypes.values():
convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
converters.append(conv)
assert name not in convertersByName, name
convertersByName[name] = conv
return converters, convertersByName
class _MissingItem(tuple):
__slots__ = ()
try:
from collections import UserList
except ImportError:
from UserList import UserList
class _LazyList(UserList):
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def __getitem__(self, k):
if isinstance(k, slice):
indices = range(*k.indices(len(self)))
return [self[i] for i in indices]
item = self.data[k]
if isinstance(item, _MissingItem):
self.reader.seek(self.pos + item[0] * self.recordSize)
item = self.conv.read(self.reader, self.font, {})
self.data[k] = item
return item
def __add__(self, other):
if isinstance(other, _LazyList):
other = list(other)
elif isinstance(other, list):
pass
else:
return NotImplemented
return list(self) + other
def __radd__(self, other):
if not isinstance(other, list):
return NotImplemented
return other + list(self)
class BaseConverter(object):
"""Base class for converter objects. Apart from the constructor, this
is an abstract class."""
def __init__(self, name, repeat, aux, tableClass=None):
self.name = name
self.repeat = repeat
self.aux = aux
self.tableClass = tableClass
self.isCount = name.endswith("Count") or name in ['DesignAxisRecordSize', 'ValueRecordSize']
self.isLookupType = name.endswith("LookupType") or name == "MorphType"
self.isPropagated = name in [
"ClassCount",
"Class2Count",
"FeatureTag",
"SettingsCount",
"VarRegionCount",
"MappingCount",
"RegionAxisCount",
"DesignAxisCount",
"DesignAxisRecordSize",
"AxisValueCount",
"ValueRecordSize",
"AxisCount",
"BaseGlyphRecordCount",
"LayerRecordCount",
]
def readArray(self, reader, font, tableDict, count):
"""Read an array of values from the reader."""
lazy = font.lazy and count > 8
if lazy:
recordSize = self.getRecordSize(reader)
if recordSize is NotImplemented:
lazy = False
if not lazy:
l = []
for i in range(count):
l.append(self.read(reader, font, tableDict))
return l
else:
l = _LazyList()
l.reader = reader.copy()
l.pos = l.reader.pos
l.font = font
l.conv = self
l.recordSize = recordSize
l.extend(_MissingItem([i]) for i in range(count))
reader.advance(count * recordSize)
return l
def getRecordSize(self, reader):
if hasattr(self, 'staticSize'): return self.staticSize
return NotImplemented
def read(self, reader, font, tableDict):
"""Read a value from the reader."""
raise NotImplementedError(self)
def writeArray(self, writer, font, tableDict, values):
for i, value in enumerate(values):
self.write(writer, font, tableDict, value, i)
def write(self, writer, font, tableDict, value, repeatIndex=None):
"""Write a value to the writer."""
raise NotImplementedError(self)
def xmlRead(self, attrs, content, font):
"""Read a value from XML."""
raise NotImplementedError(self)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
"""Write a value to XML."""
raise NotImplementedError(self)
class SimpleValue(BaseConverter):
@staticmethod
def toString(value):
return value
@staticmethod
def fromString(value):
return value
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", self.toString(value))])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return self.fromString(attrs["value"])
class IntValue(SimpleValue):
@staticmethod
def fromString(value):
return int(value, 0)
class Long(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readLong()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(value)
class ULong(IntValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readULong()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeULong(value)
class Flags32(ULong):
@staticmethod
def toString(value):
return "0x%08X" % value
class Short(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(value)
class UShort(IntValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readUShort()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(value)
class Int8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readInt8()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeInt8(value)
class UInt8(IntValue):
staticSize = 1
def read(self, reader, font, tableDict):
return reader.readUInt8()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt8(value)
class UInt24(IntValue):
staticSize = 3
def read(self, reader, font, tableDict):
return reader.readUInt24()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUInt24(value)
class ComputedInt(IntValue):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is not None:
xmlWriter.comment("%s=%s" % (name, value))
xmlWriter.newline()
class ComputedUInt8(ComputedInt, UInt8):
pass
class ComputedUShort(ComputedInt, UShort):
pass
class ComputedULong(ComputedInt, ULong):
pass
class Tag(SimpleValue):
staticSize = 4
def read(self, reader, font, tableDict):
return reader.readTag()
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeTag(value)
class GlyphID(SimpleValue):
staticSize = 2
typecode = "H"
def readArray(self, reader, font, tableDict, count):
glyphOrder = font.getGlyphOrder()
gids = reader.readArray(self.typecode, self.staticSize, count)
try:
l = [glyphOrder[gid] for gid in gids]
except IndexError:
# Slower, but will not throw an IndexError on an invalid glyph id.
l = [font.getGlyphName(gid) for gid in gids]
return l
def read(self, reader, font, tableDict):
return font.getGlyphName(reader.readValue(self.typecode, self.staticSize))
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeValue(self.typecode, font.getGlyphID(value))
class GlyphID32(GlyphID):
staticSize = 4
typecode = "L"
class NameID(UShort):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
if font and value:
nameTable = font.get("name")
if nameTable:
name = nameTable.getDebugName(value)
xmlWriter.write(" ")
if name:
xmlWriter.comment(name)
else:
xmlWriter.comment("missing from name table")
log.warning("name id %d missing from name table" % value)
xmlWriter.newline()
class STATFlags(UShort):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
flags = []
if value & 0x01:
flags.append("OlderSiblingFontAttribute")
if value & 0x02:
flags.append("ElidableAxisValueName")
if flags:
xmlWriter.write(" ")
xmlWriter.comment(" ".join(flags))
xmlWriter.newline()
class FloatValue(SimpleValue):
@staticmethod
def fromString(value):
return float(value)
class DeciPoints(FloatValue):
staticSize = 2
def read(self, reader, font, tableDict):
return reader.readUShort() / 10
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeUShort(round(value * 10))
class Fixed(FloatValue):
staticSize = 4
def read(self, reader, font, tableDict):
return fi2fl(reader.readLong(), 16)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeLong(fl2fi(value, 16))
@staticmethod
def fromString(value):
return str2fl(value, 16)
@staticmethod
def toString(value):
return fl2str(value, 16)
class F2Dot14(FloatValue):
staticSize = 2
def read(self, reader, font, tableDict):
return fi2fl(reader.readShort(), 14)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.writeShort(fl2fi(value, 14))
@staticmethod
def fromString(value):
return str2fl(value, 14)
@staticmethod
def toString(value):
return fl2str(value, 14)
class Version(SimpleValue):
staticSize = 4
def read(self, reader, font, tableDict):
value = reader.readLong()
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
return value
def write(self, writer, font, tableDict, value, repeatIndex=None):
value = fi2ve(value)
assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
writer.writeLong(value)
@staticmethod
def fromString(value):
return ve2fi(value)
@staticmethod
def toString(value):
return "0x%08x" % value
@staticmethod
def fromFloat(v):
return fl2fi(v, 16)
class Char64(SimpleValue):
"""An ASCII string with up to 64 characters.
Unused character positions are filled with 0x00 bytes.
Used in Apple AAT fonts in the `gcid` table.
"""
staticSize = 64
def read(self, reader, font, tableDict):
data = reader.readData(self.staticSize)
zeroPos = data.find(b"\0")
if zeroPos >= 0:
data = data[:zeroPos]
s = tostr(data, encoding="ascii", errors="replace")
if s != tostr(data, encoding="ascii", errors="ignore"):
log.warning('replaced non-ASCII characters in "%s"' %
s)
return s
def write(self, writer, font, tableDict, value, repeatIndex=None):
data = tobytes(value, encoding="ascii", errors="replace")
if data != tobytes(value, encoding="ascii", errors="ignore"):
log.warning('replacing non-ASCII characters in "%s"' %
value)
if len(data) > self.staticSize:
log.warning('truncating overlong "%s" to %d bytes' %
(value, self.staticSize))
data = (data + b"\0" * self.staticSize)[:self.staticSize]
writer.writeData(data)
class Struct(BaseConverter):
def getRecordSize(self, reader):
return self.tableClass and self.tableClass.getRecordSize(reader)
def read(self, reader, font, tableDict):
table = self.tableClass()
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
value.compile(writer, font)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
if attrs:
# If there are attributes (probably index), then
# don't drop this even if it's NULL. It will mess
# up the array indices of the containing element.
xmlWriter.simpletag(name, attrs + [("empty", 1)])
xmlWriter.newline()
else:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, attrs, name=name)
def xmlRead(self, attrs, content, font):
if "empty" in attrs and safeEval(attrs["empty"]):
return None
table = self.tableClass()
Format = attrs.get("Format")
if Format is not None:
table.Format = int(Format)
noPostRead = not hasattr(table, 'postRead')
if noPostRead:
# TODO Cache table.hasPropagated.
cleanPropagation = False
for conv in table.getConverters():
if conv.isPropagated:
cleanPropagation = True
if not hasattr(font, '_propagator'):
font._propagator = {}
propagator = font._propagator
assert conv.name not in propagator, (conv.name, propagator)
setattr(table, conv.name, None)
propagator[conv.name] = CountReference(table.__dict__, conv.name)
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
table.fromXML(name, attrs, content, font)
else:
pass
table.populateDefaults(propagator=getattr(font, '_propagator', None))
if noPostRead:
if cleanPropagation:
for conv in table.getConverters():
if conv.isPropagated:
propagator = font._propagator
del propagator[conv.name]
if not propagator:
del font._propagator
return table
def __repr__(self):
return "Struct of " + repr(self.tableClass)
class StructWithLength(Struct):
def read(self, reader, font, tableDict):
pos = reader.pos
table = self.tableClass()
table.decompile(reader, font)
reader.seek(pos + table.StructLength)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
for convIndex, conv in enumerate(value.getConverters()):
if conv.name == "StructLength":
break
lengthIndex = len(writer.items) + convIndex
if isinstance(value, FormatSwitchingBaseTable):
lengthIndex += 1 # implicit Format field
deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize]
before = writer.getDataLength()
value.StructLength = deadbeef
value.compile(writer, font)
length = writer.getDataLength() - before
lengthWriter = writer.getSubWriter()
conv.write(lengthWriter, font, tableDict, length)
assert(writer.items[lengthIndex] ==
b"\xde\xad\xbe\xef"[:conv.staticSize])
writer.items[lengthIndex] = lengthWriter.getAllData()
class Table(Struct):
staticSize = 2
def readOffset(self, reader):
return reader.readUShort()
def writeNullOffset(self, writer):
writer.writeUShort(0)
def read(self, reader, font, tableDict):
offset = self.readOffset(reader)
if offset == 0:
return None
table = self.tableClass()
reader = reader.getSubReader(offset)
if font.lazy:
table.reader = reader
table.font = font
else:
table.decompile(reader, font)
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
if value is None:
self.writeNullOffset(writer)
else:
subWriter = writer.getSubWriter(offsetSize=self.staticSize)
subWriter.name = self.name
if repeatIndex is not None:
subWriter.repeatIndex = repeatIndex
writer.writeSubTable(subWriter)
value.compile(subWriter, font)
class LTable(Table):
staticSize = 4
def readOffset(self, reader):
return reader.readULong()
def writeNullOffset(self, writer):
writer.writeULong(0)
# Table pointed to by a 24-bit, 3-byte long offset
class Table24(Table):
staticSize = 3
def readOffset(self, reader):
return reader.readUInt24()
def writeNullOffset(self, writer):
writer.writeUInt24(0)
# TODO Clean / merge the SubTable and SubStruct
class SubStruct(Struct):
def getConverter(self, tableType, lookupType):
tableClass = self.lookupTypes[tableType][lookupType]
return self.__class__(self.name, self.repeat, self.aux, tableClass)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
super(SubStruct, self).xmlWrite(xmlWriter, font, value, None, attrs)
class SubTable(Table):
def getConverter(self, tableType, lookupType):
tableClass = self.lookupTypes[tableType][lookupType]
return self.__class__(self.name, self.repeat, self.aux, tableClass)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
super(SubTable, self).xmlWrite(xmlWriter, font, value, None, attrs)
class ExtSubTable(LTable, SubTable):
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer.Extension = True # actually, mere presence of the field flags it as an Ext Subtable writer.
Table.write(self, writer, font, tableDict, value, repeatIndex)
class FeatureParams(Table):
def getConverter(self, featureTag):
tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
return self.__class__(self.name, self.repeat, self.aux, tableClass)
class ValueFormat(IntValue):
staticSize = 2
def __init__(self, name, repeat, aux, tableClass=None):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
def read(self, reader, font, tableDict):
format = reader.readUShort()
reader[self.which] = ValueRecordFactory(format)
return format
def write(self, writer, font, tableDict, format, repeatIndex=None):
writer.writeUShort(format)
writer[self.which] = ValueRecordFactory(format)
class ValueRecord(ValueFormat):
def getRecordSize(self, reader):
return 2 * len(reader[self.which])
def read(self, reader, font, tableDict):
return reader[self.which].readValueRecord(reader, font)
def write(self, writer, font, tableDict, value, repeatIndex=None):
writer[self.which].writeValueRecord(writer, font, value)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
if value is None:
pass # NULL table, ignore
else:
value.toXML(xmlWriter, font, self.name, attrs)
def xmlRead(self, attrs, content, font):
from .otBase import ValueRecord
value = ValueRecord()
value.fromXML(None, attrs, content, font)
return value
class AATLookup(BaseConverter):
BIN_SEARCH_HEADER_SIZE = 10
def __init__(self, name, repeat, aux, tableClass):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
if issubclass(self.tableClass, SimpleValue):
self.converter = self.tableClass(name='Value', repeat=None, aux=None)
else:
self.converter = Table(name='Value', repeat=None, aux=None, tableClass=self.tableClass)
def read(self, reader, font, tableDict):
format = reader.readUShort()
if format == 0:
return self.readFormat0(reader, font)
elif format == 2:
return self.readFormat2(reader, font)
elif format == 4:
return self.readFormat4(reader, font)
elif format == 6:
return self.readFormat6(reader, font)
elif format == 8:
return self.readFormat8(reader, font)
else:
assert False, "unsupported lookup format: %d" % format
def write(self, writer, font, tableDict, value, repeatIndex=None):
values = list(sorted([(font.getGlyphID(glyph), val)
for glyph, val in value.items()]))
# TODO: Also implement format 4.
formats = list(sorted(filter(None, [
self.buildFormat0(writer, font, values),
self.buildFormat2(writer, font, values),
self.buildFormat6(writer, font, values),
self.buildFormat8(writer, font, values),
])))
# We use the format ID as secondary sort key to make the output
# deterministic when multiple formats have same encoded size.
dataSize, lookupFormat, writeMethod = formats[0]
pos = writer.getDataLength()
writeMethod()
actualSize = writer.getDataLength() - pos
assert actualSize == dataSize, (
"AATLookup format %d claimed to write %d bytes, but wrote %d" %
(lookupFormat, dataSize, actualSize))
@staticmethod
def writeBinSearchHeader(writer, numUnits, unitSize):
writer.writeUShort(unitSize)
writer.writeUShort(numUnits)
searchRange, entrySelector, rangeShift = \
getSearchRange(n=numUnits, itemSize=unitSize)
writer.writeUShort(searchRange)
writer.writeUShort(entrySelector)
writer.writeUShort(rangeShift)
def buildFormat0(self, writer, font, values):
numGlyphs = len(font.getGlyphOrder())
if len(values) != numGlyphs:
return None
valueSize = self.converter.staticSize
return (2 + numGlyphs * valueSize, 0,
lambda: self.writeFormat0(writer, font, values))
def writeFormat0(self, writer, font, values):
writer.writeUShort(0)
for glyphID_, value in values:
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
def buildFormat2(self, writer, font, values):
segStart, segValue = values[0]
segEnd = segStart
segments = []
for glyphID, curValue in values[1:]:
if glyphID != segEnd + 1 or curValue != segValue:
segments.append((segStart, segEnd, segValue))
segStart = segEnd = glyphID
segValue = curValue
else:
segEnd = glyphID
segments.append((segStart, segEnd, segValue))
valueSize = self.converter.staticSize
numUnits, unitSize = len(segments) + 1, valueSize + 4
return (2 + self.BIN_SEARCH_HEADER_SIZE + numUnits * unitSize, 2,
lambda: self.writeFormat2(writer, font, segments))
def writeFormat2(self, writer, font, segments):
writer.writeUShort(2)
valueSize = self.converter.staticSize
numUnits, unitSize = len(segments), valueSize + 4
self.writeBinSearchHeader(writer, numUnits, unitSize)
for firstGlyph, lastGlyph, value in segments:
writer.writeUShort(lastGlyph)
writer.writeUShort(firstGlyph)
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
writer.writeUShort(0xFFFF)
writer.writeUShort(0xFFFF)
writer.writeData(b'\x00' * valueSize)
def buildFormat6(self, writer, font, values):
valueSize = self.converter.staticSize
numUnits, unitSize = len(values), valueSize + 2
return (2 + self.BIN_SEARCH_HEADER_SIZE + (numUnits + 1) * unitSize, 6,
lambda: self.writeFormat6(writer, font, values))
def writeFormat6(self, writer, font, values):
writer.writeUShort(6)
valueSize = self.converter.staticSize
numUnits, unitSize = len(values), valueSize + 2
self.writeBinSearchHeader(writer, numUnits, unitSize)
for glyphID, value in values:
writer.writeUShort(glyphID)
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
writer.writeUShort(0xFFFF)
writer.writeData(b'\x00' * valueSize)
def buildFormat8(self, writer, font, values):
minGlyphID, maxGlyphID = values[0][0], values[-1][0]
if len(values) != maxGlyphID - minGlyphID + 1:
return None
valueSize = self.converter.staticSize
return (6 + len(values) * valueSize, 8,
lambda: self.writeFormat8(writer, font, values))
def writeFormat8(self, writer, font, values):
firstGlyphID = values[0][0]
writer.writeUShort(8)
writer.writeUShort(firstGlyphID)
writer.writeUShort(len(values))
for _, value in values:
self.converter.write(
writer, font, tableDict=None,
value=value, repeatIndex=None)
def readFormat0(self, reader, font):
numGlyphs = len(font.getGlyphOrder())
data = self.converter.readArray(
reader, font, tableDict=None, count=numGlyphs)
return {font.getGlyphName(k): value
for k, value in enumerate(data)}
def readFormat2(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize, numUnits = reader.readUShort(), reader.readUShort()
assert unitSize >= 4 + self.converter.staticSize, unitSize
for i in range(numUnits):
reader.seek(pos + i * unitSize + 12)
last = reader.readUShort()
first = reader.readUShort()
value = self.converter.read(reader, font, tableDict=None)
if last != 0xFFFF:
for k in range(first, last + 1):
mapping[font.getGlyphName(k)] = value
return mapping
def readFormat4(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize = reader.readUShort()
assert unitSize >= 6, unitSize
for i in range(reader.readUShort()):
reader.seek(pos + i * unitSize + 12)
last = reader.readUShort()
first = reader.readUShort()
offset = reader.readUShort()
if last != 0xFFFF:
dataReader = reader.getSubReader(0) # relative to current position
dataReader.seek(pos + offset) # relative to start of table
data = self.converter.readArray(
dataReader, font, tableDict=None,
count=last - first + 1)
for k, v in enumerate(data):
mapping[font.getGlyphName(first + k)] = v
return mapping
def readFormat6(self, reader, font):
mapping = {}
pos = reader.pos - 2 # start of table is at UShort for format
unitSize = reader.readUShort()
assert unitSize >= 2 + self.converter.staticSize, unitSize
for i in range(reader.readUShort()):
reader.seek(pos + i * unitSize + 12)
glyphID = reader.readUShort()
value = self.converter.read(
reader, font, tableDict=None)
if glyphID != 0xFFFF:
mapping[font.getGlyphName(glyphID)] = value
return mapping
def readFormat8(self, reader, font):
first = reader.readUShort()
count = reader.readUShort()
data = self.converter.readArray(
reader, font, tableDict=None, count=count)
return {font.getGlyphName(first + k): value
for (k, value) in enumerate(data)}
def xmlRead(self, attrs, content, font):
value = {}
for element in content:
if isinstance(element, tuple):
name, a, eltContent = element
if name == "Lookup":
value[a["glyph"]] = self.converter.xmlRead(a, eltContent, font)
return value
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for glyph, value in sorted(value.items()):
self.converter.xmlWrite(
xmlWriter, font, value=value,
name="Lookup", attrs=[("glyph", glyph)])
xmlWriter.endtag(name)
xmlWriter.newline()
# The AAT 'ankr' table has an unusual structure: An offset to an AATLookup
# followed by an offset to a glyph data table. Other than usual, the
# offsets in the AATLookup are not relative to the beginning of
# the beginning of the 'ankr' table, but relative to the glyph data table.
# So, to find the anchor data for a glyph, one needs to add the offset
# to the data table to the offset found in the AATLookup, and then use
# the sum of these two offsets to find the actual data.
class AATLookupWithDataOffset(BaseConverter):
def read(self, reader, font, tableDict):
lookupOffset = reader.readULong()
dataOffset = reader.readULong()
lookupReader = reader.getSubReader(lookupOffset)
lookup = AATLookup('DataOffsets', None, None, UShort)
offsets = lookup.read(lookupReader, font, tableDict)
result = {}
for glyph, offset in offsets.items():
dataReader = reader.getSubReader(offset + dataOffset)
item = self.tableClass()
item.decompile(dataReader, font)
result[glyph] = item
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
# We do not work with OTTableWriter sub-writers because
# the offsets in our AATLookup are relative to our data
# table, for which we need to provide an offset value itself.
# It might have been possible to somehow make a kludge for
# performing this indirect offset computation directly inside
# OTTableWriter. But this would have made the internal logic
# of OTTableWriter even more complex than it already is,
# so we decided to roll our own offset computation for the
# contents of the AATLookup and associated data table.
offsetByGlyph, offsetByData, dataLen = {}, {}, 0
compiledData = []
for glyph in sorted(value, key=font.getGlyphID):
subWriter = OTTableWriter()
value[glyph].compile(subWriter, font)
data = subWriter.getAllData()
offset = offsetByData.get(data, None)
if offset == None:
offset = dataLen
dataLen = dataLen + len(data)
offsetByData[data] = offset
compiledData.append(data)
offsetByGlyph[glyph] = offset
# For calculating the offsets to our AATLookup and data table,
# we can use the regular OTTableWriter infrastructure.
lookupWriter = writer.getSubWriter(offsetSize=4)
lookup = AATLookup('DataOffsets', None, None, UShort)
lookup.write(lookupWriter, font, tableDict, offsetByGlyph, None)
dataWriter = writer.getSubWriter(offsetSize=4)
writer.writeSubTable(lookupWriter)
writer.writeSubTable(dataWriter)
for d in compiledData:
dataWriter.writeData(d)
def xmlRead(self, attrs, content, font):
lookup = AATLookup('DataOffsets', None, None, self.tableClass)
return lookup.xmlRead(attrs, content, font)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
lookup = AATLookup('DataOffsets', None, None, self.tableClass)
lookup.xmlWrite(xmlWriter, font, value, name, attrs)
class MorxSubtableConverter(BaseConverter):
_PROCESSING_ORDERS = {
# bits 30 and 28 of morx.CoverageFlags; see morx spec
(False, False): "LayoutOrder",
(True, False): "ReversedLayoutOrder",
(False, True): "LogicalOrder",
(True, True): "ReversedLogicalOrder",
}
_PROCESSING_ORDERS_REVERSED = {
val: key for key, val in _PROCESSING_ORDERS.items()
}
def __init__(self, name, repeat, aux):
BaseConverter.__init__(self, name, repeat, aux)
def _setTextDirectionFromCoverageFlags(self, flags, subtable):
if (flags & 0x20) != 0:
subtable.TextDirection = "Any"
elif (flags & 0x80) != 0:
subtable.TextDirection = "Vertical"
else:
subtable.TextDirection = "Horizontal"
def read(self, reader, font, tableDict):
pos = reader.pos
m = MorxSubtable()
m.StructLength = reader.readULong()
flags = reader.readUInt8()
orderKey = ((flags & 0x40) != 0, (flags & 0x10) != 0)
m.ProcessingOrder = self._PROCESSING_ORDERS[orderKey]
self._setTextDirectionFromCoverageFlags(flags, m)
m.Reserved = reader.readUShort()
m.Reserved |= (flags & 0xF) << 16
m.MorphType = reader.readUInt8()
m.SubFeatureFlags = reader.readULong()
tableClass = lookupTypes["morx"].get(m.MorphType)
if tableClass is None:
assert False, ("unsupported 'morx' lookup type %s" %
m.MorphType)
# To decode AAT ligatures, we need to know the subtable size.
# The easiest way to pass this along is to create a new reader
# that works on just the subtable as its data.
headerLength = reader.pos - pos
data = reader.data[
reader.pos
: reader.pos + m.StructLength - headerLength]
assert len(data) == m.StructLength - headerLength
subReader = OTTableReader(data=data, tableTag=reader.tableTag)
m.SubStruct = tableClass()
m.SubStruct.decompile(subReader, font)
reader.seek(pos + m.StructLength)
return m
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
xmlWriter.comment("StructLength=%d" % value.StructLength)
xmlWriter.newline()
xmlWriter.simpletag("TextDirection", value=value.TextDirection)
xmlWriter.newline()
xmlWriter.simpletag("ProcessingOrder",
value=value.ProcessingOrder)
xmlWriter.newline()
if value.Reserved != 0:
xmlWriter.simpletag("Reserved",
value="0x%04x" % value.Reserved)
xmlWriter.newline()
xmlWriter.comment("MorphType=%d" % value.MorphType)
xmlWriter.newline()
xmlWriter.simpletag("SubFeatureFlags",
value="0x%08x" % value.SubFeatureFlags)
xmlWriter.newline()
value.SubStruct.toXML(xmlWriter, font)
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
m = MorxSubtable()
covFlags = 0
m.Reserved = 0
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "CoverageFlags":
# Only in XML from old versions of fonttools.
covFlags = safeEval(eltAttrs["value"])
orderKey = ((covFlags & 0x40) != 0,
(covFlags & 0x10) != 0)
m.ProcessingOrder = self._PROCESSING_ORDERS[
orderKey]
self._setTextDirectionFromCoverageFlags(
covFlags, m)
elif eltName == "ProcessingOrder":
m.ProcessingOrder = eltAttrs["value"]
assert m.ProcessingOrder in self._PROCESSING_ORDERS_REVERSED, "unknown ProcessingOrder: %s" % m.ProcessingOrder
elif eltName == "TextDirection":
m.TextDirection = eltAttrs["value"]
assert m.TextDirection in {"Horizontal", "Vertical", "Any"}, "unknown TextDirection %s" % m.TextDirection
elif eltName == "Reserved":
m.Reserved = safeEval(eltAttrs["value"])
elif eltName == "SubFeatureFlags":
m.SubFeatureFlags = safeEval(eltAttrs["value"])
elif eltName.endswith("Morph"):
m.fromXML(eltName, eltAttrs, eltContent, font)
else:
assert False, eltName
m.Reserved = (covFlags & 0xF) << 16 | m.Reserved
return m
def write(self, writer, font, tableDict, value, repeatIndex=None):
covFlags = (value.Reserved & 0x000F0000) >> 16
reverseOrder, logicalOrder = self._PROCESSING_ORDERS_REVERSED[
value.ProcessingOrder]
covFlags |= 0x80 if value.TextDirection == "Vertical" else 0
covFlags |= 0x40 if reverseOrder else 0
covFlags |= 0x20 if value.TextDirection == "Any" else 0
covFlags |= 0x10 if logicalOrder else 0
value.CoverageFlags = covFlags
lengthIndex = len(writer.items)
before = writer.getDataLength()
value.StructLength = 0xdeadbeef
# The high nibble of value.Reserved is actuallly encoded
# into coverageFlags, so we need to clear it here.
origReserved = value.Reserved # including high nibble
value.Reserved = value.Reserved & 0xFFFF # without high nibble
value.compile(writer, font)
value.Reserved = origReserved # restore original value
assert writer.items[lengthIndex] == b"\xde\xad\xbe\xef"
length = writer.getDataLength() - before
writer.items[lengthIndex] = struct.pack(">L", length)
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6Tables.html#ExtendedStateHeader
# TODO: Untangle the implementation of the various lookup-specific formats.
class STXHeader(BaseConverter):
def __init__(self, name, repeat, aux, tableClass):
BaseConverter.__init__(self, name, repeat, aux, tableClass)
assert issubclass(self.tableClass, AATAction)
self.classLookup = AATLookup("GlyphClasses", None, None, UShort)
if issubclass(self.tableClass, ContextualMorphAction):
self.perGlyphLookup = AATLookup("PerGlyphLookup",
None, None, GlyphID)
else:
self.perGlyphLookup = None
def read(self, reader, font, tableDict):
table = AATStateTable()
pos = reader.pos
classTableReader = reader.getSubReader(0)
stateArrayReader = reader.getSubReader(0)
entryTableReader = reader.getSubReader(0)
actionReader = None
ligaturesReader = None
table.GlyphClassCount = reader.readULong()
classTableReader.seek(pos + reader.readULong())
stateArrayReader.seek(pos + reader.readULong())
entryTableReader.seek(pos + reader.readULong())
if self.perGlyphLookup is not None:
perGlyphTableReader = reader.getSubReader(0)
perGlyphTableReader.seek(pos + reader.readULong())
if issubclass(self.tableClass, LigatureMorphAction):
actionReader = reader.getSubReader(0)
actionReader.seek(pos + reader.readULong())
ligComponentReader = reader.getSubReader(0)
ligComponentReader.seek(pos + reader.readULong())
ligaturesReader = reader.getSubReader(0)
ligaturesReader.seek(pos + reader.readULong())
numLigComponents = (ligaturesReader.pos
- ligComponentReader.pos) // 2
assert numLigComponents >= 0
table.LigComponents = \
ligComponentReader.readUShortArray(numLigComponents)
table.Ligatures = self._readLigatures(ligaturesReader, font)
elif issubclass(self.tableClass, InsertionMorphAction):
actionReader = reader.getSubReader(0)
actionReader.seek(pos + reader.readULong())
table.GlyphClasses = self.classLookup.read(classTableReader,
font, tableDict)
numStates = int((entryTableReader.pos - stateArrayReader.pos)
/ (table.GlyphClassCount * 2))
for stateIndex in range(numStates):
state = AATState()
table.States.append(state)
for glyphClass in range(table.GlyphClassCount):
entryIndex = stateArrayReader.readUShort()
state.Transitions[glyphClass] = \
self._readTransition(entryTableReader,
entryIndex, font,
actionReader)
if self.perGlyphLookup is not None:
table.PerGlyphLookups = self._readPerGlyphLookups(
table, perGlyphTableReader, font)
return table
def _readTransition(self, reader, entryIndex, font, actionReader):
transition = self.tableClass()
entryReader = reader.getSubReader(
reader.pos + entryIndex * transition.staticSize)
transition.decompile(entryReader, font, actionReader)
return transition
def _readLigatures(self, reader, font):
limit = len(reader.data)
numLigatureGlyphs = (limit - reader.pos) // 2
return [font.getGlyphName(g)
for g in reader.readUShortArray(numLigatureGlyphs)]
def _countPerGlyphLookups(self, table):
# Somewhat annoyingly, the morx table does not encode
# the size of the per-glyph table. So we need to find
# the maximum value that MorphActions use as index
# into this table.
numLookups = 0
for state in table.States:
for t in state.Transitions.values():
if isinstance(t, ContextualMorphAction):
if t.MarkIndex != 0xFFFF:
numLookups = max(
numLookups,
t.MarkIndex + 1)
if t.CurrentIndex != 0xFFFF:
numLookups = max(
numLookups,
t.CurrentIndex + 1)
return numLookups
def _readPerGlyphLookups(self, table, reader, font):
pos = reader.pos
lookups = []
for _ in range(self._countPerGlyphLookups(table)):
lookupReader = reader.getSubReader(0)
lookupReader.seek(pos + reader.readULong())
lookups.append(
self.perGlyphLookup.read(lookupReader, font, {}))
return lookups
def write(self, writer, font, tableDict, value, repeatIndex=None):
glyphClassWriter = OTTableWriter()
self.classLookup.write(glyphClassWriter, font, tableDict,
value.GlyphClasses, repeatIndex=None)
glyphClassData = pad(glyphClassWriter.getAllData(), 2)
glyphClassCount = max(value.GlyphClasses.values()) + 1
glyphClassTableOffset = 16 # size of STXHeader
if self.perGlyphLookup is not None:
glyphClassTableOffset += 4
glyphClassTableOffset += self.tableClass.actionHeaderSize
actionData, actionIndex = \
self.tableClass.compileActions(font, value.States)
stateArrayData, entryTableData = self._compileStates(
font, value.States, glyphClassCount, actionIndex)
stateArrayOffset = glyphClassTableOffset + len(glyphClassData)
entryTableOffset = stateArrayOffset + len(stateArrayData)
perGlyphOffset = entryTableOffset + len(entryTableData)
perGlyphData = \
pad(self._compilePerGlyphLookups(value, font), 4)
if actionData is not None:
actionOffset = entryTableOffset + len(entryTableData)
else:
actionOffset = None
ligaturesOffset, ligComponentsOffset = None, None
ligComponentsData = self._compileLigComponents(value, font)
ligaturesData = self._compileLigatures(value, font)
if ligComponentsData is not None:
assert len(perGlyphData) == 0
ligComponentsOffset = actionOffset + len(actionData)
ligaturesOffset = ligComponentsOffset + len(ligComponentsData)
writer.writeULong(glyphClassCount)
writer.writeULong(glyphClassTableOffset)
writer.writeULong(stateArrayOffset)
writer.writeULong(entryTableOffset)
if self.perGlyphLookup is not None:
writer.writeULong(perGlyphOffset)
if actionOffset is not None:
writer.writeULong(actionOffset)
if ligComponentsOffset is not None:
writer.writeULong(ligComponentsOffset)
writer.writeULong(ligaturesOffset)
writer.writeData(glyphClassData)
writer.writeData(stateArrayData)
writer.writeData(entryTableData)
writer.writeData(perGlyphData)
if actionData is not None:
writer.writeData(actionData)
if ligComponentsData is not None:
writer.writeData(ligComponentsData)
if ligaturesData is not None:
writer.writeData(ligaturesData)
def _compileStates(self, font, states, glyphClassCount, actionIndex):
stateArrayWriter = OTTableWriter()
entries, entryIDs = [], {}
for state in states:
for glyphClass in range(glyphClassCount):
transition = state.Transitions[glyphClass]
entryWriter = OTTableWriter()
transition.compile(entryWriter, font,
actionIndex)
entryData = entryWriter.getAllData()
assert len(entryData) == transition.staticSize, ( \
"%s has staticSize %d, "
"but actually wrote %d bytes" % (
repr(transition),
transition.staticSize,
len(entryData)))
entryIndex = entryIDs.get(entryData)
if entryIndex is None:
entryIndex = len(entries)
entryIDs[entryData] = entryIndex
entries.append(entryData)
stateArrayWriter.writeUShort(entryIndex)
stateArrayData = pad(stateArrayWriter.getAllData(), 4)
entryTableData = pad(bytesjoin(entries), 4)
return stateArrayData, entryTableData
def _compilePerGlyphLookups(self, table, font):
if self.perGlyphLookup is None:
return b""
numLookups = self._countPerGlyphLookups(table)
assert len(table.PerGlyphLookups) == numLookups, (
"len(AATStateTable.PerGlyphLookups) is %d, "
"but the actions inside the table refer to %d" %
(len(table.PerGlyphLookups), numLookups))
writer = OTTableWriter()
for lookup in table.PerGlyphLookups:
lookupWriter = writer.getSubWriter(offsetSize=4)
self.perGlyphLookup.write(lookupWriter, font,
{}, lookup, None)
writer.writeSubTable(lookupWriter)
return writer.getAllData()
def _compileLigComponents(self, table, font):
if not hasattr(table, "LigComponents"):
return None
writer = OTTableWriter()
for component in table.LigComponents:
writer.writeUShort(component)
return writer.getAllData()
def _compileLigatures(self, table, font):
if not hasattr(table, "Ligatures"):
return None
writer = OTTableWriter()
for glyphName in table.Ligatures:
writer.writeUShort(font.getGlyphID(glyphName))
return writer.getAllData()
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
xmlWriter.comment("GlyphClassCount=%s" %value.GlyphClassCount)
xmlWriter.newline()
for g, klass in sorted(value.GlyphClasses.items()):
xmlWriter.simpletag("GlyphClass", glyph=g, value=klass)
xmlWriter.newline()
for stateIndex, state in enumerate(value.States):
xmlWriter.begintag("State", index=stateIndex)
xmlWriter.newline()
for glyphClass, trans in sorted(state.Transitions.items()):
trans.toXML(xmlWriter, font=font,
attrs={"onGlyphClass": glyphClass},
name="Transition")
xmlWriter.endtag("State")
xmlWriter.newline()
for i, lookup in enumerate(value.PerGlyphLookups):
xmlWriter.begintag("PerGlyphLookup", index=i)
xmlWriter.newline()
for glyph, val in sorted(lookup.items()):
xmlWriter.simpletag("Lookup", glyph=glyph,
value=val)
xmlWriter.newline()
xmlWriter.endtag("PerGlyphLookup")
xmlWriter.newline()
if hasattr(value, "LigComponents"):
xmlWriter.begintag("LigComponents")
xmlWriter.newline()
for i, val in enumerate(getattr(value, "LigComponents")):
xmlWriter.simpletag("LigComponent", index=i,
value=val)
xmlWriter.newline()
xmlWriter.endtag("LigComponents")
xmlWriter.newline()
self._xmlWriteLigatures(xmlWriter, font, value, name, attrs)
xmlWriter.endtag(name)
xmlWriter.newline()
def _xmlWriteLigatures(self, xmlWriter, font, value, name, attrs):
if not hasattr(value, "Ligatures"):
return
xmlWriter.begintag("Ligatures")
xmlWriter.newline()
for i, g in enumerate(getattr(value, "Ligatures")):
xmlWriter.simpletag("Ligature", index=i, glyph=g)
xmlWriter.newline()
xmlWriter.endtag("Ligatures")
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
table = AATStateTable()
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "GlyphClass":
glyph = eltAttrs["glyph"]
value = eltAttrs["value"]
table.GlyphClasses[glyph] = safeEval(value)
elif eltName == "State":
state = self._xmlReadState(eltAttrs, eltContent, font)
table.States.append(state)
elif eltName == "PerGlyphLookup":
lookup = self.perGlyphLookup.xmlRead(
eltAttrs, eltContent, font)
table.PerGlyphLookups.append(lookup)
elif eltName == "LigComponents":
table.LigComponents = \
self._xmlReadLigComponents(
eltAttrs, eltContent, font)
elif eltName == "Ligatures":
table.Ligatures = \
self._xmlReadLigatures(
eltAttrs, eltContent, font)
table.GlyphClassCount = max(table.GlyphClasses.values()) + 1
return table
def _xmlReadState(self, attrs, content, font):
state = AATState()
for eltName, eltAttrs, eltContent in filter(istuple, content):
if eltName == "Transition":
glyphClass = safeEval(eltAttrs["onGlyphClass"])
transition = self.tableClass()
transition.fromXML(eltName, eltAttrs,
eltContent, font)
state.Transitions[glyphClass] = transition
return state
def _xmlReadLigComponents(self, attrs, content, font):
ligComponents = []
for eltName, eltAttrs, _eltContent in filter(istuple, content):
if eltName == "LigComponent":
ligComponents.append(
safeEval(eltAttrs["value"]))
return ligComponents
def _xmlReadLigatures(self, attrs, content, font):
ligs = []
for eltName, eltAttrs, _eltContent in filter(istuple, content):
if eltName == "Ligature":
ligs.append(eltAttrs["glyph"])
return ligs
class CIDGlyphMap(BaseConverter):
def read(self, reader, font, tableDict):
numCIDs = reader.readUShort()
result = {}
for cid, glyphID in enumerate(reader.readUShortArray(numCIDs)):
if glyphID != 0xFFFF:
result[cid] = font.getGlyphName(glyphID)
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
items = {cid: font.getGlyphID(glyph)
for cid, glyph in value.items()}
count = max(items) + 1 if items else 0
writer.writeUShort(count)
for cid in range(count):
writer.writeUShort(items.get(cid, 0xFFFF))
def xmlRead(self, attrs, content, font):
result = {}
for eName, eAttrs, _eContent in filter(istuple, content):
if eName == "CID":
result[safeEval(eAttrs["cid"])] = \
eAttrs["glyph"].strip()
return result
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for cid, glyph in sorted(value.items()):
if glyph is not None and glyph != 0xFFFF:
xmlWriter.simpletag(
"CID", cid=cid, glyph=glyph)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
class GlyphCIDMap(BaseConverter):
def read(self, reader, font, tableDict):
glyphOrder = font.getGlyphOrder()
count = reader.readUShort()
cids = reader.readUShortArray(count)
if count > len(glyphOrder):
log.warning("GlyphCIDMap has %d elements, "
"but the font has only %d glyphs; "
"ignoring the rest" %
(count, len(glyphOrder)))
result = {}
for glyphID in range(min(len(cids), len(glyphOrder))):
cid = cids[glyphID]
if cid != 0xFFFF:
result[glyphOrder[glyphID]] = cid
return result
def write(self, writer, font, tableDict, value, repeatIndex=None):
items = {font.getGlyphID(g): cid
for g, cid in value.items()
if cid is not None and cid != 0xFFFF}
count = max(items) + 1 if items else 0
writer.writeUShort(count)
for glyphID in range(count):
writer.writeUShort(items.get(glyphID, 0xFFFF))
def xmlRead(self, attrs, content, font):
result = {}
for eName, eAttrs, _eContent in filter(istuple, content):
if eName == "CID":
result[eAttrs["glyph"]] = \
safeEval(eAttrs["value"])
return result
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.begintag(name, attrs)
xmlWriter.newline()
for glyph, cid in sorted(value.items()):
if cid is not None and cid != 0xFFFF:
xmlWriter.simpletag(
"CID", glyph=glyph, value=cid)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
class DeltaValue(BaseConverter):
def read(self, reader, font, tableDict):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
minusOffset = 1 << nBits
mask = (1 << nBits) - 1
signMask = 1 << (nBits - 1)
DeltaValue = []
tmp, shift = 0, 0
for i in range(nItems):
if shift == 0:
tmp, shift = reader.readUShort(), 16
shift = shift - nBits
value = (tmp >> shift) & mask
if value & signMask:
value = value - minusOffset
DeltaValue.append(value)
return DeltaValue
def write(self, writer, font, tableDict, value, repeatIndex=None):
StartSize = tableDict["StartSize"]
EndSize = tableDict["EndSize"]
DeltaFormat = tableDict["DeltaFormat"]
DeltaValue = value
assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
nItems = EndSize - StartSize + 1
nBits = 1 << DeltaFormat
assert len(DeltaValue) == nItems
mask = (1 << nBits) - 1
tmp, shift = 0, 16
for value in DeltaValue:
shift = shift - nBits
tmp = tmp | ((value & mask) << shift)
if shift == 0:
writer.writeUShort(tmp)
tmp, shift = 0, 16
if shift != 16:
writer.writeUShort(tmp)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
class VarIdxMapValue(BaseConverter):
def read(self, reader, font, tableDict):
fmt = tableDict['EntryFormat']
nItems = tableDict['MappingCount']
innerBits = 1 + (fmt & 0x000F)
innerMask = (1<<innerBits) - 1
outerMask = 0xFFFFFFFF - innerMask
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
read = {
1: reader.readUInt8,
2: reader.readUShort,
3: reader.readUInt24,
4: reader.readULong,
}[entrySize]
mapping = []
for i in range(nItems):
raw = read()
idx = ((raw & outerMask) << outerShift) | (raw & innerMask)
mapping.append(idx)
return mapping
def write(self, writer, font, tableDict, value, repeatIndex=None):
fmt = tableDict['EntryFormat']
mapping = value
writer['MappingCount'].setValue(len(mapping))
innerBits = 1 + (fmt & 0x000F)
innerMask = (1<<innerBits) - 1
outerShift = 16 - innerBits
entrySize = 1 + ((fmt & 0x0030) >> 4)
write = {
1: writer.writeUInt8,
2: writer.writeUShort,
3: writer.writeUInt24,
4: writer.writeULong,
}[entrySize]
for idx in mapping:
raw = ((idx & 0xFFFF0000) >> outerShift) | (idx & innerMask)
write(raw)
class VarDataValue(BaseConverter):
def read(self, reader, font, tableDict):
values = []
regionCount = tableDict["VarRegionCount"]
shortCount = tableDict["NumShorts"]
for i in range(min(regionCount, shortCount)):
values.append(reader.readShort())
for i in range(min(regionCount, shortCount), regionCount):
values.append(reader.readInt8())
for i in range(regionCount, shortCount):
reader.readInt8()
return values
def write(self, writer, font, tableDict, value, repeatIndex=None):
regionCount = tableDict["VarRegionCount"]
shortCount = tableDict["NumShorts"]
for i in range(min(regionCount, shortCount)):
writer.writeShort(value[i])
for i in range(min(regionCount, shortCount), regionCount):
writer.writeInt8(value[i])
for i in range(regionCount, shortCount):
writer.writeInt8(0)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
return safeEval(attrs["value"])
class LookupFlag(UShort):
def xmlWrite(self, xmlWriter, font, value, name, attrs):
xmlWriter.simpletag(name, attrs + [("value", value)])
flags = []
if value & 0x01: flags.append("rightToLeft")
if value & 0x02: flags.append("ignoreBaseGlyphs")
if value & 0x04: flags.append("ignoreLigatures")
if value & 0x08: flags.append("ignoreMarks")
if value & 0x10: flags.append("useMarkFilteringSet")
if value & 0xff00: flags.append("markAttachmentType[%i]" % (value >> 8))
if flags:
xmlWriter.comment(" ".join(flags))
xmlWriter.newline()
def _issubclass_namedtuple(x):
return (
issubclass(x, tuple)
and getattr(x, "_fields", None) is not None
)
class _NamedTupleConverter(BaseConverter):
# subclasses must override this
tupleClass = NotImplemented
# List[SimpleValue]
converterClasses = NotImplemented
def __init__(self, name, repeat, aux, tableClass=None):
# we expect all converters to be subclasses of SimpleValue
assert all(issubclass(klass, SimpleValue) for klass in self.converterClasses)
assert _issubclass_namedtuple(self.tupleClass), repr(self.tupleClass)
assert len(self.tupleClass._fields) == len(self.converterClasses)
assert tableClass is None # tableClass is unused by SimplValues
BaseConverter.__init__(self, name, repeat, aux)
self.converters = [
klass(name=name, repeat=None, aux=None)
for name, klass in zip(self.tupleClass._fields, self.converterClasses)
]
self.convertersByName = {conv.name: conv for conv in self.converters}
# returned by getRecordSize method
self.staticSize = sum(c.staticSize for c in self.converters)
def read(self, reader, font, tableDict):
kwargs = {
conv.name: conv.read(reader, font, tableDict)
for conv in self.converters
}
return self.tupleClass(**kwargs)
def write(self, writer, font, tableDict, value, repeatIndex=None):
for conv in self.converters:
v = getattr(value, conv.name)
# repeatIndex is unused for SimpleValues
conv.write(writer, font, tableDict, v, repeatIndex=None)
def xmlWrite(self, xmlWriter, font, value, name, attrs):
assert value is not None
defaults = value.__new__.__defaults__ or ()
assert len(self.converters) >= len(defaults)
values = {}
required = object()
for conv, default in zip_longest(
reversed(self.converters),
reversed(defaults),
fillvalue=required,
):
v = getattr(value, conv.name)
if default is required or v != default:
values[conv.name] = conv.toString(v)
if attrs is None:
attrs = []
attrs.extend(
(conv.name, values[conv.name])
for conv in self.converters
if conv.name in values
)
xmlWriter.simpletag(name, attrs)
xmlWriter.newline()
def xmlRead(self, attrs, content, font):
converters = self.convertersByName
kwargs = {
k: converters[k].fromString(v)
for k, v in attrs.items()
}
return self.tupleClass(**kwargs)
class VarFixed(_NamedTupleConverter):
tupleClass = VariableFloat
converterClasses = [Fixed, ULong]
class VarF2Dot14(_NamedTupleConverter):
tupleClass = VariableFloat
converterClasses = [F2Dot14, ULong]
class VarInt16(_NamedTupleConverter):
tupleClass = VariableInt
converterClasses = [Short, ULong]
class VarUInt16(_NamedTupleConverter):
tupleClass = VariableInt
converterClasses = [UShort, ULong]
class _UInt8Enum(UInt8):
enumClass = NotImplemented
def read(self, reader, font, tableDict):
return self.enumClass(super().read(reader, font, tableDict))
@classmethod
def fromString(cls, value):
return getattr(cls.enumClass, value.upper())
@classmethod
def toString(cls, value):
return cls.enumClass(value).name.lower()
class ExtendMode(_UInt8Enum):
enumClass = _ExtendMode
class CompositeMode(_UInt8Enum):
enumClass = _CompositeMode
converterMapping = {
# type class
"int8": Int8,
"int16": Short,
"uint8": UInt8,
"uint16": UShort,
"uint24": UInt24,
"uint32": ULong,
"char64": Char64,
"Flags32": Flags32,
"Version": Version,
"Tag": Tag,
"GlyphID": GlyphID,
"GlyphID32": GlyphID32,
"NameID": NameID,
"DeciPoints": DeciPoints,
"Fixed": Fixed,
"F2Dot14": F2Dot14,
"struct": Struct,
"Offset": Table,
"LOffset": LTable,
"Offset24": Table24,
"ValueRecord": ValueRecord,
"DeltaValue": DeltaValue,
"VarIdxMapValue": VarIdxMapValue,
"VarDataValue": VarDataValue,
"LookupFlag": LookupFlag,
"ExtendMode": ExtendMode,
"CompositeMode": CompositeMode,
"STATFlags": STATFlags,
# AAT
"CIDGlyphMap": CIDGlyphMap,
"GlyphCIDMap": GlyphCIDMap,
"MortChain": StructWithLength,
"MortSubtable": StructWithLength,
"MorxChain": StructWithLength,
"MorxSubtable": MorxSubtableConverter,
# "Template" types
"AATLookup": lambda C: partial(AATLookup, tableClass=C),
"AATLookupWithDataOffset": lambda C: partial(AATLookupWithDataOffset, tableClass=C),
"STXHeader": lambda C: partial(STXHeader, tableClass=C),
"OffsetTo": lambda C: partial(Table, tableClass=C),
"LOffsetTo": lambda C: partial(LTable, tableClass=C),
"LOffset24To": lambda C: partial(Table24, tableClass=C),
# Variable types
"VarFixed": VarFixed,
"VarF2Dot14": VarF2Dot14,
"VarInt16": VarInt16,
"VarUInt16": VarUInt16,
}
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/otConverters.py",
"copies": "3",
"size": "58471",
"license": "apache-2.0",
"hash": -2829462744219346400,
"line_mean": 31.3580520199,
"line_max": 115,
"alpha_frac": 0.7093430932,
"autogenerated": false,
"ratio": 3.184348110227644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5393691203427644,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import bytesjoin, tostr
from io import BytesIO
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
from collections.abc import MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
"""Reader for Mac OS resource forks.
Parses a resource fork and returns resources according to their type.
If run on OS X, this will open the resource fork in the filesystem.
Otherwise, it will open the file itself and attempt to read it as
though it were a resource fork.
The returned object can be indexed by type and iterated over,
returning in each case a list of py:class:`Resource` objects
representing all the resources of a certain type.
"""
def __init__(self, fileOrPath):
"""Open a file
Args:
fileOrPath: Either an object supporting a ``read`` method, an
``os.PathLike`` object, or a string.
"""
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
"""A list of the types of resources in the resource fork."""
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
"""Returns a list of indices of resources of a given type."""
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
"""Represents a resource stored within a resource fork.
Attributes:
type: resource type.
data: resource data.
id: ID.
name: resource name.
attr: attributes.
"""
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/misc/macRes.py",
"copies": "5",
"size": "7408",
"license": "apache-2.0",
"hash": -6409355885275127000,
"line_mean": 27.6023166023,
"line_max": 77,
"alpha_frac": 0.7020788337,
"autogenerated": false,
"ratio": 3.295373665480427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6497452499180427,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import *
from AppKit import *
import re
import weakref
from vanilla import *
from vanilla.dialogs import getFile
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.roboFont import version as RoboFontVersion
from mojo.events import addObserver, removeObserver
from mojo.drawingTools import *
from mojo.UI import MultiLineView
from mojo.glyphPreview import GlyphPreview
from mojo.extensions import getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
from lib.UI.splitter import Splitter
from lib.UI.enterTextEditor import EnterTextEditor
from lib.tools.misc import NSColorToRgba, rgbaToNSColor
from lib.tools.glyphList import GN2UV
from lib.UI.statusBar import StatusBar
# debug only
# import glyphConstruction
# import importlib
# importlib.reload(glyphConstruction)
from glyphConstruction import GlyphConstructionBuilder, ParseGlyphConstructionListFromString, GlyphBuilderError, ParseVariables
from glyphConstructionLexer import GlyphConstructionLexer
from glyphConstructionWindow import GlyphConstructionWindow
from lib.scripting.codeEditor import CodeEditor
import os
defaultKey = "com.typemytype.glyphBuilder"
constructions = """# capitals
Agrave = A + grave.cap@center,top
Aacute = A + acute.cap@center,top
Acircumflex = A + circumflex.cap@center,top
Atilde = A + tilde.cap@center,top
Adieresis = A + dieresis.cap@center,top
Aring = A + ring.cap@center,top
Ccedilla = C + cedilla.cap@center, bottom
Egrave = E + grave.cap@center, top
Eacute = E + acute.cap@center, top
Ecircumflex = E + circumflex.cap@center, top
Edieresis = E + dieresis.cap@center, top
Igrave = I + grave.cap@center, top
Iacute = I + acute.cap@center, top
Icircumflex = I + circumflex.cap@center, top
Idieresis = I + dieresis.cap@center, top
Ntilde = N + tilde@center,top
Ograve = O + grave.cap@center, top
Oacute = O + acute.cap@center, top
Ocircumflex = O + circumflex.cap@center, top
Otilde = O + tilde.cap@center, top
Odieresis = O + dieresis.cap@center, top
Oslash = O + slash@center,center
Scaron = S + caron.cap@center, top
Ugrave = U + grave.cap@center, top
Uacute = U + acute.cap@center, top
Ucircumflex = U + circumflex.cap@center, top
Udieresis = U + dieresis.cap@center, top
Zcaron = Z + caron.cap@center, top
# capitals ligatures
F_L = F & L
F_I = F & I
AE = A & E@75%,origin
OE = O & E@75%,origin
# lowercase
agrave = a + grave@center,top
aacute = a + acute@center,top
acircumflex = a + circumflex@center,top
atilde = a + tilde@center,top
adieresis = a + dieresis@center,top
aring = a + ring@center,top
amacron = a + macron@center,top
abreve = a + breve@center,top
aogonek = a + ogonek@innerRight,bottom
aringacute = a + ring@center,top + acute@center,110%
ccedilla =c + cedilla@center,bottom
egrave = e + grave@center,top
eacute = e + acute@center,top
ecircumflex = e + circumflex@center,top
edieresis = e + dieresis@center,top
emacron = e + macron@center,top
ebreve = e + breve@center,top
edotaccent = e + dotaccent@center,top
eogonek = e + ogonek@center,bottom
ecaron = e + caron@center,top
igrave = dotlessi + grave@center,top
iacute = dotlessi + acute@center,top
icircumflex = dotlessi + circumflex@center,top
idieresis = dotlessi + dieresis@center,top
ograve = o + grave@center,top
oacute = o + acute@center,top
otilde = o + tilde@center,top
odieresis = o + dieresis@center,top
ohungarumlaut = o + hungarumlaut@center,top
oslash = o + slash@center,center
scaron = s + caron@center,top
yacute = y + acute@center,top
ydieresis = y + dieresis@center,top
zcaron = z + caron@center,top
# lowercase ligatures
fi = f & i
fl = f & l
f_f_i = f & f & i
f_f_l = f & f & l
ae = a & e@80%,orgin
oe = o & e@80%,orgin
# fractions
onequarter = fraction@110%,origin + one.superior@innerLeft,innerTop + four.inferior@ fraction:innerRight,fraction:innerBottom
onehalf = fraction@110%,origin + one.superior@innerLeft,innerTop + two.inferior@ fraction:innerRight,fraction:innerBottom
threequarters = fraction@110%,origin + three.superior@innerLeft,innerTop + four.inferior@ fraction:innerRight,fraction:innerBottom
percent = fraction@110%,origin + zero.superior@innerLeft,innerTop + zero.inferior@ fraction:innerRight,fraction:innerBottom
perthousand = fraction@110%,origin + zero.superior@innerLeft,innerTop + zero.inferior@ fraction:innerRight,fraction:innerBottom & zero.inferior@fraction:right,fraction:innerBottom
# some test cases
L_aringacute=L & a+ring@center,top+acute@center,top
"""
constructions = ""
overWriteRE = re.compile(
r"^\s*" # space before, not required
r"#" # command symbol is required
r"\s*" # space before, not required
r"OverwriteExistingGlyphs" # OverwriteExistingGlyphs, required, and ignore case
r"\s*" # space before, not required
r":" # : is required
r"\s*" # space before, not required
r"(True|False)" # capture True, False
r"\s*" # space after, not required
r"$", # end of line
re.IGNORECASE | re.MULTILINE
)
autoUnicodesRE = re.compile(
r"^\s*" # space before, not required
r"#" # command symbol is required
r"\s*" # space before, not required
r"AutoUnicodes" # AutoUnicodes, required, and ignore case
r"\s*" # space before, not required
r":" # : is required
r"\s*" # space before, not required
r"(True|False)" # capture True, False
r"\s*" # space after, not required
r"$", # end of line
re.IGNORECASE | re.MULTILINE
)
markGlyphRE = re.compile(
r"^\s*" # space before, not required
r"#" # command symbol is required
r"\s*" # space before, not required
r"MarkGlyphs" # MarkGlyphs, required, and ignore case
r"\s*" # space before, not required
r":" # : is required
r"\s*" # space before, not required
r"([-+]?\d*\.\d+|\d+)" # a float number
r"\s*" # space before, not required
r"," # comma
r"\s*" # space before, not required
r"([-+]?\d*\.\d+|\d+)" # a float number
r"\s*" # space before, not required
r"," # comma
r"\s*" # space before, not required
r"([-+]?\d*\.\d+|\d+)" # a float number
r"\s*" # space before, not required
r"," # comma
r"\s*" # space before, not required
r"([-+]?\d*\.\d+|\d+)" # a float number
r"\s*" # space after, not required
r"$", # end of line
re.IGNORECASE | re.MULTILINE
)
dontMarkGlyphRE = re.compile(
r"^\s*" # space before, not required
r"#" # command symbol is required
r"\s*" # space before, not required
r"MarkGlyphs" # MarkGlyphs, required, and ignore case
r"\s*" # space before, not required
r":" # : is required
r"\s*" # space before, not required
r"(False)" # capture True, False
r"\s*" # space after, not required
r"$", # end of line
re.IGNORECASE | re.MULTILINE
)
class GlyphConstructorFont(object):
def __init__(self, font):
self.font = font
self.glyphsDone = {}
def __getattr__(self, attr):
return getattr(self.font, attr)
def __getitem__(self, glyphName):
if glyphName in self.glyphsDone:
return self.glyphsDone[glyphName]
return self.font[glyphName]
def __contains__(self, glyphName):
if glyphName in self.glyphsDone:
return True
return glyphName in self.font
def __len__(self):
return len(self.keys())
def keys(self):
return set(list(self.font.keys()) + list(self.glyphsDone.keys()))
def __iter__(self):
names = self.keys()
while names:
name = names[0]
yield self[name]
names = names[1:]
class AnalyserTextEditor(EnterTextEditor):
def __init__(self, *args, **kwargs):
super(AnalyserTextEditor, self).__init__(*args, **kwargs)
self.getNSScrollView().setBorderType_(NSNoBorder)
try:
self.getNSTextView().setUsesFindBar_(True)
except Exception:
self.getNSTextView().setUsesFindPanel_(True)
# basicAttrs = getBasicTextAttributes()
font = NSFont.fontWithName_size_("Menlo", 10)
# self.getNSTextView().setTypingAttributes_(basicAttrs)
self.getNSTextView().setFont_(font)
def _stringDict(d, verb):
out = []
for key in sorted(d.keys()):
value = d[key]
out.append("\tGlyph %s %s %s" % (key, verb, ", ".join(value)))
return "\n".join(out)
def analyseConstructions(font, constructionGlyphs):
missingGlyphs = []
notMissingGlyphs = []
missingComponents = {}
unusedComponents = {}
doubleEntries = []
done = []
for construction in constructionGlyphs:
if construction.name is None:
continue
if construction.name not in font:
missingGlyphs.append(construction.name)
continue
notMissingGlyphs.append(construction.name)
if construction.name in done:
doubleEntries.append(construction.name)
done.append(construction.name)
glyph = font[construction.name]
glyphComponentNames = [component.baseGlyph for component in glyph.components]
constructionComponentNames = [component.baseGlyph for component in construction.components]
if glyphComponentNames == constructionComponentNames:
# same values in the same order
continue
other = list(constructionComponentNames)
for name in glyphComponentNames:
if name in other:
other.remove(name)
else:
if name not in unusedComponents:
unusedComponents[glyph.name] = []
unusedComponents[glyph.name].append(name)
other = list(glyphComponentNames)
for name in constructionComponentNames:
if name in other:
other.remove(name)
else:
if name not in unusedComponents:
missingComponents[glyph.name] = []
missingComponents[glyph.name].append(name)
text = []
if doubleEntries:
text += [
"Double entries:",
"---------------",
"\t" + "\n\t".join(doubleEntries),
"\n"
]
if missingGlyphs:
text += [
"Missing Glyphs:",
"---------------",
"\t" + "\n\t".join(missingGlyphs),
"\n"
]
if notMissingGlyphs:
text += [
"Existing Glyphs:",
"---------------",
"\t" + "\n\t".join(notMissingGlyphs),
"\n"
]
if missingComponents:
text += [
"Existing Glyphs with Missing Components:",
"----------------------------------------",
_stringDict(missingComponents, "is missing"),
"\n"
]
if unusedComponents:
text += [
"Existing Glyphs with different components:",
"------------------------------------------",
_stringDict(unusedComponents, "has no"),
"\n"
]
return "\n".join(text)
class BuildGlyphsSheet(BaseWindowController):
overWriteKey = "%s.overWrite" % defaultKey
useMarkColorKey = "%s.useMarkColor" % defaultKey
markColorKey = "%s.markColor" % defaultKey
autoUnicodesKey = "%s.autoUnicodes" % defaultKey
def __init__(self, constructions, font, parentWindow, shouldOverWrite=None, shouldAutoUnicodes=None, shouldUseMarkColor=None):
self.font = font
self.constructions = constructions
self.w = Sheet((300, 170), parentWindow=parentWindow)
getExtensionDefault, setExtensionDefault, getExtensionDefaultColor, setExtensionDefaultColor
y = 15
if shouldOverWrite is None:
shouldOverWrite = getExtensionDefault(self.overWriteKey, True)
self.w.overWrite = CheckBox((15, y, 200, 22), "Overwrite Existing Glyphs", value=shouldOverWrite)
y += 35
if shouldAutoUnicodes is None:
shouldAutoUnicodes = getExtensionDefault(self.autoUnicodesKey, True)
self.w.autoUnicodes = CheckBox((15, y, 200, 22), "Auto Unicodes", value=shouldAutoUnicodes)
y += 35
if shouldUseMarkColor is None:
shouldUseMarkColor = getExtensionDefault(self.useMarkColorKey, True)
markColor = getExtensionDefaultColor(self.markColorKey, NSColor.redColor())
elif not shouldUseMarkColor:
markColor = getExtensionDefaultColor(self.markColorKey, NSColor.redColor())
else:
markColor = rgbaToNSColor(shouldUseMarkColor)
shouldUseMarkColor = True
self.w.markGlyphs = CheckBox((15, y, 200, 22), "Mark Glyphs", value=shouldUseMarkColor, callback=self.markGlyphsCallback)
self.w.markGlyphColor = ColorWell((130, y - 5, 50, 30), color=markColor)
self.w.markGlyphColor.enable(getExtensionDefault(self.overWriteKey, True))
self.w.okButton = Button((-70, -30, -15, 20), "Build", callback=self.buildCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.okButton)
self.w.closeButton = Button((-140, -30, -80, 20), "Cancel", callback=self.closeCallback, sizeStyle="small")
self.w.closeButton.bind(".", ["command"])
self.w.closeButton.bind(unichr(27), [])
self.w.open()
def markGlyphsCallback(self, sender):
self.w.markGlyphColor.enable(sender.get())
def buildCallback(self, sender):
overWrite = self.w.overWrite.get()
markColor = None
if self.w.markGlyphs.get():
markColor = NSColorToRgba(self.w.markGlyphColor.get())
characterMap = None
if self.w.autoUnicodes.get():
characterMap = GN2UV
progress = self.startProgress("Building Glyphs...", len(self.constructions))
font = self.font
font.naked().holdNotifications()
for construction in self.constructions:
progress.update()
if construction.name is None:
continue
if construction.name in font and not overWrite:
continue
glyph = self.font.newGlyph(construction.name)
glyph.clear()
glyph.width = construction.width
if construction.unicode is not None:
glyph.unicode = construction.unicode
elif characterMap and construction.name in characterMap:
glyph.unicode = characterMap[construction.name]
else:
glyph.unicode = None
glyph.note = construction.note
construction.draw(glyph.getPen())
if construction.markColor:
glyph.markColor = tuple(construction.markColor)
elif markColor:
glyph.markColor = markColor
font.naked().releaseHeldNotifications()
progress.close()
self.closeCallback(sender)
def closeCallback(self, sender):
overWrite = self.w.overWrite.get()
autoUnicodes = self.w.autoUnicodes.get()
useMarkColor = self.w.markGlyphs.get()
markColor = None
if useMarkColor:
markColor = self.w.markGlyphColor.get()
setExtensionDefault(self.overWriteKey, bool(overWrite))
setExtensionDefault(self.autoUnicodesKey, bool(autoUnicodes))
setExtensionDefault(self.useMarkColorKey, bool(useMarkColor))
if markColor is not None:
setExtensionDefaultColor(self.markColorKey, markColor)
self.w.close()
class GlyphBuilderController(BaseWindowController):
fileNameKey = "%s.lastSavedFileName" % defaultKey
glyphLibConstructionKey = "%s.construction" % defaultKey
def __init__(self, font):
self.font = None
self._glyphs = []
self._filePath = None
statusBarHeight = 20
self.w = GlyphConstructionWindow((900, 700), "Glyph Builder", minSize=(400, 400))
self.w.getNSWindow().setSave_saveAsCallback_(self.saveFile, self.saveFileAs)
self.w.getNSWindow().setCollectionBehavior_(128) # NSWindowCollectionBehaviorFullScreenPrimary
toolbarItems = [
dict(
itemIdentifier="save",
label="Save",
imageNamed="toolbarScriptSave",
callback=self.saveFile,
),
dict(
itemIdentifier="open",
label="Open",
imageNamed="toolbarScriptOpen",
callback=self.openFile,
),
dict(itemIdentifier=NSToolbarSpaceItemIdentifier),
dict(
itemIdentifier="reload",
label="Update",
imageNamed="toolbarScriptReload",
callback=self.reload,
),
dict(itemIdentifier=NSToolbarSpaceItemIdentifier),
dict(
itemIdentifier="analyse",
label="Analyse",
imageNamed="prefToolbarSort",
callback=self.analyse,
),
dict(itemIdentifier=NSToolbarFlexibleSpaceItemIdentifier),
dict(
itemIdentifier="buildGlyphs",
label="Build Glyphs",
imageNamed="toolbarRun",
callback=self.generateGlyphs
),
]
self.w.addToolbar(toolbarIdentifier="GlyphBuilderControllerToolbar", toolbarItems=toolbarItems, addStandardItems=False)
self.constructions = CodeEditor((0, 0, -0, -0), constructions, lexer=GlyphConstructionLexer())
# self.constructions.wrapWord(False) # in only availbel in the RoboFont 1.7 beta
self.constructions.getNSScrollView().setBorderType_(NSNoBorder)
self.preview = MultiLineView(
(0, 0, -0, -0),
pointSize=50,
lineHeight=500,
applyKerning=False,
displayOptions={
"Beam": False,
"displayMode": "Multi Line"
},
selectionCallback=self.previewSelectionCallback
)
self.analyser = AnalyserTextEditor((0, 0, -0, -0), readOnly=True)
self.analyserPreview = Group((0, 0, -0, -0))
constructionColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(0, 0, 0, .6)
self.analyserPreview.construction = GlyphPreview((0, 0, -0, -0), contourColor=constructionColor, componentColor=constructionColor)
self.analyserPreview.construction.getNSView()._buffer = 100
originColor = NSColor.colorWithCalibratedRed_green_blue_alpha_(1, 0, 0, .6)
self.analyserPreview.origin = GlyphPreview((0, 0, -0, -0), contourColor=originColor, componentColor=originColor)
self.analyserPreview.origin.getNSView()._buffer = 100
self.analyserPreview.build = Button((10, -30, -10, 20), "Build", sizeStyle="small", callback=self.buildSingleGlyph)
self.analyserPreview.build.enable(False)
paneDescriptions = [
dict(view=self.analyser, identifier="analyserText", canCollapse=False, minSize=100),
dict(view=self.analyserPreview, identifier="analyserPreview", canCollapse=False, minSize=100),
]
self.analyserSplit = Splitter((0, 0, -0, -statusBarHeight), paneDescriptions=paneDescriptions, drawBorderLine=False, isVertical=False, dividerThickness=1)
paneDescriptions = [
dict(view=self.constructions, identifier="constructions", canCollapse=False, minSize=200, maxSize=600, liveResizeable=False),
dict(view=self.preview, identifier="preview", canCollapse=False, minSize=300, liveResizeable=True),
dict(view=self.analyserSplit, identifier="analyser", canCollapse=True, minSize=100, maxSize=300, liveResizeable=False)
]
self.w.split = Splitter((0, 0, -0, -statusBarHeight), paneDescriptions=paneDescriptions, drawBorderLine=False, dividerThickness=1)
# self.w.split.showPane("analyser", True)
self.w.statusBar = StatusBar((0, -statusBarHeight, -0, statusBarHeight))
self.w.statusBar.hiddenReload = Button((0, 0, -0, -0), "Reload", self.reload)
button = self.w.statusBar.hiddenReload.getNSButton()
button.setBezelStyle_(NSRoundRectBezelStyle)
button.setAlphaValue_(0)
self.w.statusBar.hiddenReload.bind("\r", ["command"])
self.w.statusBar.hiddenSave = Button((0, 0, -0, -0), "Reload", self.saveFile)
button = self.w.statusBar.hiddenSave.getNSButton()
button.setBezelStyle_(NSRoundRectBezelStyle)
button.setAlphaValue_(0)
self.w.statusBar.hiddenSave.bind("s", ["command"])
self.subscribeFont(font)
self.setUpBaseWindowBehavior()
addObserver(self, "fontBecameCurrent", "fontBecameCurrent")
addObserver(self, "fontResignCurrent", "fontResignCurrent")
self.w.open()
def subscribeFont(self, font):
self.unsubscribeFont()
self.font = font
if font is not None:
self.preview.setFont(font)
self.font.naked().addObserver(self, "fontChanged", "Font.Changed")
self.constructionsCallback(self.constructions)
def unsubscribeFont(self):
if self.font is not None:
self.preview.setFont(None)
self.preview.set([])
self.font.removeObserver(self, notification="Font.Changed")
self.font = None
def constructionsCallback(self, sender, update=True):
if self.font is None:
return
font = self.font.naked()
self.glyphConstructorFont = GlyphConstructorFont(font)
self._glyphs = []
errors = []
try:
constructions = ParseGlyphConstructionListFromString(sender.get(), font)
except GlyphBuilderError as err:
constructions = []
errors.append(str(err))
for construction in constructions:
if not construction:
glyph = self.preview.createNewLineGlyph()
elif construction in self.glyphConstructorFont.glyphsDone:
glyph = self.glyphConstructorFont.glyphsDone[construction]
else:
try:
constructionGlyph = GlyphConstructionBuilder(construction, self.glyphConstructorFont, characterMap=None)
except GlyphBuilderError as err:
errors.append(str(err))
continue
if constructionGlyph.name is None:
errors.append(construction)
continue
if RoboFontVersion < "2.0":
glyph = font._instantiateGlyphObject()
else:
glyph = font.layers.defaultLayer.instantiateGlyphObject()
glyph.lib[self.glyphLibConstructionKey] = construction
glyph.name = constructionGlyph.name
glyph.unicode = constructionGlyph.unicode
glyph.note = constructionGlyph.note
glyph.markColor = constructionGlyph.markColor
if RoboFontVersion < "2.0":
glyph.setParent(self.glyphConstructorFont)
glyph.dispatcher = font.dispatcher
else:
glyph._font = weakref.ref(self.glyphConstructorFont)
# glyph._dispatcher = font._dispatcher
glyph.width = constructionGlyph.width
constructionGlyph.draw(glyph.getPen())
self.glyphConstructorFont.glyphsDone[glyph.name] = glyph
self._glyphs.append(glyph)
if errors:
print("Errors:")
print("\n".join(errors))
if update:
self.preview.set(self._glyphs)
self.analyser.set(analyseConstructions(font, self._glyphs))
# preview
def previewSelectionCallback(self, sender):
def _niceNumber(value):
i = int(value)
if i == value:
return "%i" % value
else:
return "%.2f" % value
glyph = sender.getSelectedGlyph()
if glyph is not None and glyph.name is None:
glyph = None
status = []
if glyph is not None:
width = _niceNumber(glyph.width)
leftMargin = _niceNumber(glyph.leftMargin)
rightMargin = _niceNumber(glyph.rightMargin)
status = [
glyph.name,
"width: %s left: %s right: %s" % (width, leftMargin, rightMargin),
"components: %s" % (", ".join([component.baseGlyph for component in glyph.components]))
]
if glyph.unicode:
status.append("unicode: %04X" % glyph.unicode)
if glyph.note:
status.append("note: %s" % (glyph.note[:30] + (glyph.note[30:] and unichr(0x2026))))
if glyph.markColor:
status.append("mark: %s" % ", ".join([str(c) for c in glyph.markColor]))
rawConstructions = self.constructions.get()
searchConstruction = glyph.lib.get(self.glyphLibConstructionKey)
if searchConstruction is not None:
if searchConstruction not in rawConstructions:
_, variables = ParseVariables(rawConstructions)
for variableName, variableValue in variables.items():
searchConstruction = searchConstruction.replace(variableValue, "{%s}" % variableName)
if searchConstruction in rawConstructions:
selectedRange = NSMakeRange(rawConstructions.index(searchConstruction), len(searchConstruction))
self.constructions.getNSTextView().setSelectedRange_(selectedRange)
self.w.statusBar.set(status)
self.analyserPreview.construction.setGlyph(glyph)
self.analyserPreview.build.enable(glyph is not None)
if glyph is not None:
self.analyserPreview.build.setTitle("Build %s" % glyph.name)
else:
self.analyserPreview.build.setTitle("Build")
if glyph is not None and glyph.name in self.font:
self.analyserPreview.origin.setGlyph(self.font[glyph.name])
else:
self.analyserPreview.origin.setGlyph(None)
def buildSingleGlyph(self, sender):
glyph = self.preview.getSelectedGlyph()
if glyph is None:
return
if self.font is None:
return
dest = self.font.newGlyph(glyph.name)
dest.clear()
glyph.draw(dest.getPen())
dest.unicode = glyph.unicode
dest.note = glyph.note
if glyph.markColor:
dest.markColor = glyph.markColor
dest.width = glyph.width
# toolbar
def generateGlyphs(self, sender):
self.reload(update=False)
if not self._glyphs:
return
if self.font is None:
return
rawConstructions = self.constructions.get()
overWriteResult = overWriteRE.search(rawConstructions)
if overWriteResult:
overWriteResult = overWriteResult.groups()[0].strip().lower() == "true"
autoUnicodesResult = autoUnicodesRE.search(rawConstructions)
if autoUnicodesResult:
autoUnicodesResult = autoUnicodesResult.groups()[0].strip().lower() == "true"
dontMarkGlyphResult = dontMarkGlyphRE.search(rawConstructions)
if dontMarkGlyphResult:
markGlyphResult = False
else:
markGlyphResult = markGlyphRE.search(rawConstructions)
if markGlyphResult:
try:
markGlyphResult = float(markGlyphResult.groups()[0]), float(markGlyphResult.groups()[1]), float(markGlyphResult.groups()[2]), float(markGlyphResult.groups()[3])
except Exception:
pass
BuildGlyphsSheet(self._glyphs, self.font, self.w, shouldOverWrite=overWriteResult, shouldAutoUnicodes=autoUnicodesResult, shouldUseMarkColor=markGlyphResult)
def reload(self, sender=None, update=True):
self.constructionsCallback(self.constructions, update)
def _saveFile(self, path):
if self.font is not None:
self.font.lib[self.fileNameKey] = os.path.splitext(os.path.basename(path))[0]
txt = self.constructions.get()
f = open(path, "w", encoding="utf-8")
f.write(txt)
f.close()
self._filePath = path
def saveFile(self, sender=None):
if self._filePath is None:
preferredName = None
if self.font is not None and self.font.path is not None:
preferredName = os.path.splitext(os.path.basename(self.font.path))[0]
if self.fileNameKey in self.font.lib.keys():
# see if we have saved this file before and use that as first choice
preferredName = self.font.lib.get(self.fileNameKey)
self.showPutFile(["glyphConstruction"], fileName=preferredName, callback=self._saveFile)
else:
self._saveFile(self._filePath)
def saveFileAs(self, sender=None):
self._filePath = None
self.saveFile(sender)
def setFile(self, path):
f = open(path, "r", encoding="utf-8")
txt = f.read()
f.close()
self.constructions.set(txt)
self._filePath = path
def _openFile(self, paths):
if paths:
path = paths[0]
self.setFile(path)
def openFile(self, sender=None):
directory = fileName = None
if self.font is not None and self.font.path is not None:
if self.fileNameKey in self.font.lib.keys():
fileName = self.font.lib.get(self.fileNameKey, "")
fileName += ".glyphConstruction"
directory = os.path.dirname(self.font.path)
fileName = os.path.join(directory, fileName)
directory = None
getFile(fileTypes=["glyphConstruction"], parentWindow=self.w.getNSWindow(), directory=directory, fileName=fileName, resultCallback=self._openFile)
# self.showGetFile(["glyphConstruction"], callback=self._openFile)
def analyse(self, sender=None):
self.w.split.togglePane("analyser", False)
self.reload()
# notifications
def fontChanged(self, notification):
self.reload()
def fontBecameCurrent(self, notification):
font = notification["font"]
self.subscribeFont(font)
def fontResignCurrent(self, notification):
self.unsubscribeFont()
def windowCloseCallback(self, sender):
self.unsubscribeFont()
removeObserver(self, "fontBecameCurrent")
removeObserver(self, "fontResignCurrent")
super(GlyphBuilderController, self).windowCloseCallback(sender)
if __name__ == '__main__':
GlyphBuilderController(CurrentFont())
| {
"repo_name": "typemytype/GlyphConstruction",
"path": "GlyphConstruction.roboFontExt/lib/glyphConstructionController.py",
"copies": "3",
"size": "31785",
"license": "mit",
"hash": 5952169880683015000,
"line_mean": 34.9966024915,
"line_max": 180,
"alpha_frac": 0.6057574328,
"autogenerated": false,
"ratio": 4.016300227445034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6122057660245034,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from ._h_e_a_d import mac_epoch_diff
from . import DefaultTable
import time
import calendar
FFTMFormat = """
> # big endian
version: I
FFTimeStamp: Q
sourceCreated: Q
sourceModified: Q
"""
class table_F_F_T_M_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
def compile(self, ttFont):
data = sstruct.pack(FFTMFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("FontForge's timestamp, font source creation and modification dates")
writer.newline()
formatstring, names, fixes = sstruct.getformat(FFTMFormat)
for name in names:
value = getattr(self, name)
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
try:
value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
except ValueError:
value = time.asctime(time.gmtime(0))
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
value = calendar.timegm(time.strptime(value)) - mac_epoch_diff
else:
value = safeEval(value)
setattr(self, name, value) | {
"repo_name": "googlei18n/fontuley",
"path": "src/third_party/fontTools/Lib/fontTools/ttLib/tables/F_F_T_M_.py",
"copies": "7",
"size": "1408",
"license": "apache-2.0",
"hash": 5668241076642348000,
"line_mean": 29.6304347826,
"line_max": 88,
"alpha_frac": 0.6803977273,
"autogenerated": false,
"ratio": 3.52,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7700397727299999,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import *
import os
import time
from xml.etree import ElementTree
# ----------------------
# Very Simple XML Writer
# ----------------------
class XMLWriter(object):
def __init__(self):
self._root = None
self._elements = []
def simpletag(self, tag, **kwargs):
ElementTree.SubElement(self._elements[-1], tag, **kwargs)
def begintag(self, tag, **kwargs):
if self._elements:
s = ElementTree.SubElement(self._elements[-1], tag, **kwargs)
else:
s = ElementTree.Element(tag, **kwargs)
if self._root is None:
self._root = s
self._elements.append(s)
def endtag(self, tag):
assert self._elements[-1].tag == tag
del self._elements[-1]
def write(self, text):
if self._elements[-1].text is None:
self._elements[-1].text = text
else:
self._elements[-1].text += text
def compile(self, encoding="utf-8"):
f = StringIO()
tree = ElementTree.ElementTree(self._root)
indent(tree.getroot())
tree.write(f, encoding=encoding)
text = f.getvalue()
del f
return text
def indent(elem, level=0):
# this is from http://effbot.python-hosting.com/file/effbotlib/ElementTree.py
i = "\n" + level * "\t"
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + "\t"
for e in elem:
indent(e, level + 1)
if not e.tail or not e.tail.strip():
e.tail = i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# ------------
# HTML Helpers
# ------------
defaultCSS = """
body {
background-color: #e5e5e5;
padding: 15px 15px 0px 15px;
margin: 0px;
font-family: Helvetica, Verdana, Arial, sans-serif;
}
h2.readError {
background-color: red;
color: white;
margin: 20px 15px 20px 15px;
padding: 10px;
border-radius: 5px;
-webkit-border-radius: 5px;
-moz-border-radius: 5px;
-webkit-box-shadow: #999 0 2px 5px;
-moz-box-shadow: #999 0 2px 5px;
font-size: 25px;
}
/* info blocks */
.infoBlock {
background-color: white;
margin: 0px 0px 15px 0px;
padding: 15px;
border-radius: 5px;
-webkit-border-radius: 5px;
-moz-border-radius: 5px;
-webkit-box-shadow: rgba(0, 0, 0, .3) 0 2px 5px;
-moz-box-shadow: rgba(0, 0, 0, .3) 0 2px 5px;
}
h3.infoBlockTitle {
font-size: 20px;
margin: 0px 0px 15px 0px;
padding: 0px 0px 10px 0px;
border-bottom: 1px solid #e5e5e5;
}
h4.infoBlockTitle {
font-size: 17px;
margin: 0px 0px 15px 0px;
padding: 0px 0px 10px 0px;
border-bottom: 1px solid #e5e5e5;
}
table.report {
border-collapse: collapse;
width: 100%;
font-size: 14px;
}
table.report tr {
border-top: 1px solid white;
}
table.report tr.testPass, table.report tr.testReportPass {
background-color: #c8ffaf;
}
table.report tr.testError, table.report tr.testReportError {
background-color: #ffc3af;
}
table.report tr.testWarning, table.report tr.testReportWarning {
background-color: #ffe1af;
}
table.report tr.testNote, table.report tr.testReportNote {
background-color: #96e1ff;
}
table.report tr.testTraceback, table.report tr.testReportTraceback {
background-color: red;
color: white;
}
table.report td {
padding: 7px 5px 7px 5px;
vertical-align: top;
}
table.report td.title {
width: 80px;
text-align: right;
font-weight: bold;
text-transform: uppercase;
}
table.report td.testReportResultCount {
width: 100px;
}
table.report td.toggleButton {
text-align: center;
width: 50px;
border-left: 1px solid white;
cursor: pointer;
}
.infoBlock td p.info {
font-size: 12px;
font-style: italic;
margin: 5px 0px 0px 0px;
}
/* SFNT table */
table.sfntTableData {
font-size: 14px;
width: 100%;
border-collapse: collapse;
padding: 0px;
}
table.sfntTableData th {
padding: 5px 0px 5px 0px;
text-align: left
}
table.sfntTableData tr.uncompressed {
background-color: #ffc3af;
}
table.sfntTableData td {
width: 20%;
padding: 5px 0px 5px 0px;
border: 1px solid #e5e5e5;
border-left: none;
border-right: none;
font-family: Consolas, Menlo, "Vera Mono", Monaco, monospace;
}
pre {
font-size: 12px;
font-family: Consolas, Menlo, "Vera Mono", Monaco, monospace;
margin: 0px;
padding: 0px;
}
/* Metadata */
.metadataElement {
background: rgba(0, 0, 0, 0.03);
margin: 10px 0px 10px 0px;
border: 2px solid #d8d8d8;
padding: 10px;
}
h5.metadata {
font-size: 14px;
margin: 5px 0px 10px 0px;
padding: 0px 0px 5px 0px;
border-bottom: 1px solid #d8d8d8;
}
h6.metadata {
font-size: 12px;
font-weight: normal;
margin: 10px 0px 10px 0px;
padding: 0px 0px 5px 0px;
border-bottom: 1px solid #d8d8d8;
}
table.metadata {
font-size: 12px;
width: 100%;
border-collapse: collapse;
padding: 0px;
}
table.metadata td.key {
width: 5em;
padding: 5px 5px 5px 0px;
border-right: 1px solid #d8d8d8;
text-align: right;
vertical-align: top;
}
table.metadata td.value {
padding: 5px 0px 5px 5px;
border-left: 1px solid #d8d8d8;
text-align: left;
vertical-align: top;
}
p.metadata {
font-size: 12px;
font-style: italic;
}
/* Proof */
/* proof: @font-face rule */
p.characterSet {
/* proof: @font-face font-family */
line-height: 135%;
word-wrap: break-word;
margin: 0px;
padding: 0px;
}
p.sampleText {
/* proof: @font-face font-family */
line-height: 135%;
margin: .5em 0px 0px 0px;
padding: .5em 0px 0px 0px;
border-top: 1px solid #e5e5e5;
}
"""
defaultJavascript = """
//<![CDATA[
function testResultToggleButtonHit(buttonID, className) {
// change the button title
var element = document.getElementById(buttonID);
if (element.innerHTML == "Show" ) {
element.innerHTML = "Hide";
}
else {
element.innerHTML = "Show";
}
// toggle the elements
var elements = getTestResults(className);
for (var e = 0; e < elements.length; ++e) {
toggleElement(elements[e]);
}
// toggle the info blocks
toggleInfoBlocks();
}
function getTestResults(className) {
var rows = document.getElementsByTagName("tr");
var found = Array();
for (var r = 0; r < rows.length; ++r) {
var row = rows[r];
if (row.className == className) {
found[found.length] = row;
}
}
return found;
}
function toggleElement(element) {
if (element.style.display != "none" ) {
element.style.display = "none";
}
else {
element.style.display = "";
}
}
function toggleInfoBlocks() {
var tables = document.getElementsByTagName("table")
for (var t = 0; t < tables.length; ++t) {
var table = tables[t];
if (table.className == "report") {
var haveVisibleRow = false;
var rows = table.rows;
for (var r = 0; r < rows.length; ++r) {
var row = rows[r];
if (row.style.display == "none") {
var i = 0;
}
else {
haveVisibleRow = true;
}
}
var div = table.parentNode;
if (haveVisibleRow == true) {
div.style.display = "";
}
else {
div.style.display = "none";
}
}
}
}
//]]>
"""
def startHTML(title=None, cssReplacements={}):
writer = XMLWriter()
# start the html
writer.begintag("html", xmlns="http://www.w3.org/1999/xhtml", lang="en")
# start the head
writer.begintag("head")
writer.simpletag("meta", http_equiv="Content-Type", content="text/html; charset=utf-8")
# title
if title is not None:
writer.begintag("title")
writer.write(title)
writer.endtag("title")
# write the css
writer.begintag("style", type="text/css")
css = defaultCSS
for before, after in cssReplacements.items():
css = css.replace(before, after)
writer.write(css)
writer.endtag("style")
# write the javascript
writer.begintag("script", type="text/javascript")
javascript = defaultJavascript
## hack around some ElementTree escaping
javascript = javascript.replace("<", "l_e_s_s")
javascript = javascript.replace(">", "g_r_e_a_t_e_r")
writer.write(javascript)
writer.endtag("script")
# close the head
writer.endtag("head")
# start the body
writer.begintag("body")
# return the writer
return writer
def finishHTML(writer):
# close the body
writer.endtag("body")
# close the html
writer.endtag("html")
# get the text
text = "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n"
text += writer.compile()
text = text.replace("c_l_a_s_s", "class")
text = text.replace("a_p_o_s_t_r_o_p_h_e", "'")
text = text.replace("l_e_s_s", "<")
text = text.replace("g_r_e_a_t_e_r", ">")
text = text.replace("http_equiv", "http-equiv")
# return
return text
# ---------
# File Name
# ---------
def findUniqueFileName(path):
if not os.path.exists(path):
return path
folder = os.path.dirname(path)
fileName = os.path.basename(path)
fileName, extension = os.path.splitext(fileName)
stamp = time.strftime("%Y-%m-%d %H-%M-%S %Z")
newFileName = "%s (%s)%s" % (fileName, stamp, extension)
newPath = os.path.join(folder, newFileName)
# intentionally break to prevent a file overwrite.
# this could happen if the user has a directory full
# of files with future time stamped file names.
# not likely, but avoid it all the same.
assert not os.path.exists(newPath)
return newPath
| {
"repo_name": "typesupply/woffTools",
"path": "Lib/woffTools/tools/support.py",
"copies": "1",
"size": "9396",
"license": "mit",
"hash": 8245489622908094000,
"line_mean": 21.5323741007,
"line_max": 140,
"alpha_frac": 0.6332481907,
"autogenerated": false,
"ratio": 2.845548152634767,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.884468593780604,
"avg_score": 0.026822081105745237,
"num_lines": 417
} |
from fontTools.misc.py23 import *
try:
set
except NameError:
from sets import Set as set
class FeatureTableWriter(object):
"""
A very simple feature file syntax table writer.
"""
def __init__(self, name, indentation=" "):
self._name = name
self._lineSep = "\n"
self._indentation = indentation
self._lines = ["table %s {" % name]
def addLineWithKeyValue(self, key, value):
"""
Adds a line with form::
key value;
"""
line = "%s %s;" % (key, str(value))
self.addLine(line)
def addLine(self, line):
"""
Adds a raw line.
"""
line = self._indentation + line
self._lines.append(line)
def write(self):
"""
Returns the text of the table.
"""
lines = self._lines + ["} %s;" % self._name]
return self._lineSep.join(lines)
# --------------
# Text Utilities
# --------------
# The comments were taken from the feature file syntax spec.
def winCharEncode(char):
exceptions = set("\\\"\t\n\r")
# Strings are converted to Unicode for the Windows platform
# by adding a high byte of 0. 2-byte Unicode values for the
# Windows platform may be specified using a special character
# sequence of a backslash character (\) followed by exactly
# four hexadecimal numbers (of either case) which may not all
# be zero, e.g. \4e2d.
# The ASCII backslash character must be represented as the
# sequence \005c or \005C and the ASCII double quote character
# must be represented as the sequence \0022.
value = ord(char)
if value > 128 or char in exceptions:
v = hex(value)[2:].upper()
v = "0" * (4 - len(v)) + v
return "\\" + v
return char
def macCharEncode(char):
exceptions = set("\\\"\t\n\r")
# character codes in the range 128-255 may be specified
# using a special character sequence of a backslash
# character (\) followed by exactly two hexadecimal numbers
# (of either case) which may not both be zero, e.g. \83.
# The ASCII blackslash character must be represented as the
# sequence \5c or \5C and the ASCII double quote character
# must be represented as the sequence \22.
try:
value = ord(char.encode("macroman"))
if (128 < value and value < 256) or char in exceptions:
v = hex(value)[2:].upper()
v = "0" * (2 - len(v)) + v
return "\\" + v
except UnicodeEncodeError:
pass
value = ord(char)
if value >= 256:
v = hex(value)[2:].upper()
v = "0" * (4 - len(v)) + v
return "\\" + v
return char
def winStr(text):
"""
Convert string to FDK encoding for Windows.
"""
return str("".join([winCharEncode(c) for c in unicode(text)]))
def macStr(text):
"""
Convert string to FDK encoding for Mac.
"""
return str("".join([macCharEncode(c) for c in unicode(text)]))
| {
"repo_name": "moyogo/ufo2fdk",
"path": "Lib/ufo2fdk/featureTableWriter.py",
"copies": "1",
"size": "2999",
"license": "mit",
"hash": -3631144616018957300,
"line_mean": 27.2924528302,
"line_max": 66,
"alpha_frac": 0.5805268423,
"autogenerated": false,
"ratio": 3.894805194805195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9975332037105195,
"avg_score": 0,
"num_lines": 106
} |
from fontTools.misc.py23 import Tag, bytechr, byteord, bytesjoin
from io import BytesIO
import sys
import array
import struct
from collections import OrderedDict
from fontTools.misc import sstruct
from fontTools.misc.arrayTools import calcIntBounds
from fontTools.misc.textTools import pad
from fontTools.ttLib import (TTFont, TTLibError, getTableModule, getTableClass,
getSearchRange)
from fontTools.ttLib.sfnt import (SFNTReader, SFNTWriter, DirectoryEntry,
WOFFFlavorData, sfntDirectoryFormat, sfntDirectorySize, SFNTDirectoryEntry,
sfntDirectoryEntrySize, calcChecksum)
from fontTools.ttLib.tables import ttProgram, _g_l_y_f
import logging
log = logging.getLogger("fontTools.ttLib.woff2")
haveBrotli = False
try:
try:
import brotlicffi as brotli
except ImportError:
import brotli
haveBrotli = True
except ImportError:
pass
class WOFF2Reader(SFNTReader):
flavor = "woff2"
def __init__(self, file, checkChecksums=0, fontNumber=-1):
if not haveBrotli:
log.error(
'The WOFF2 decoder requires the Brotli Python extension, available at: '
'https://github.com/google/brotli')
raise ImportError("No module named brotli")
self.file = file
signature = Tag(self.file.read(4))
if signature != b"wOF2":
raise TTLibError("Not a WOFF2 font (bad signature)")
self.file.seek(0)
self.DirectoryEntry = WOFF2DirectoryEntry
data = self.file.read(woff2DirectorySize)
if len(data) != woff2DirectorySize:
raise TTLibError('Not a WOFF2 font (not enough data)')
sstruct.unpack(woff2DirectoryFormat, data, self)
self.tables = OrderedDict()
offset = 0
for i in range(self.numTables):
entry = self.DirectoryEntry()
entry.fromFile(self.file)
tag = Tag(entry.tag)
self.tables[tag] = entry
entry.offset = offset
offset += entry.length
totalUncompressedSize = offset
compressedData = self.file.read(self.totalCompressedSize)
decompressedData = brotli.decompress(compressedData)
if len(decompressedData) != totalUncompressedSize:
raise TTLibError(
'unexpected size for decompressed font data: expected %d, found %d'
% (totalUncompressedSize, len(decompressedData)))
self.transformBuffer = BytesIO(decompressedData)
self.file.seek(0, 2)
if self.length != self.file.tell():
raise TTLibError("reported 'length' doesn't match the actual file size")
self.flavorData = WOFF2FlavorData(self)
# make empty TTFont to store data while reconstructing tables
self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
def __getitem__(self, tag):
"""Fetch the raw table data. Reconstruct transformed tables."""
entry = self.tables[Tag(tag)]
if not hasattr(entry, 'data'):
if entry.transformed:
entry.data = self.reconstructTable(tag)
else:
entry.data = entry.loadData(self.transformBuffer)
return entry.data
def reconstructTable(self, tag):
"""Reconstruct table named 'tag' from transformed data."""
entry = self.tables[Tag(tag)]
rawData = entry.loadData(self.transformBuffer)
if tag == 'glyf':
# no need to pad glyph data when reconstructing
padding = self.padding if hasattr(self, 'padding') else None
data = self._reconstructGlyf(rawData, padding)
elif tag == 'loca':
data = self._reconstructLoca()
elif tag == 'hmtx':
data = self._reconstructHmtx(rawData)
else:
raise TTLibError("transform for table '%s' is unknown" % tag)
return data
def _reconstructGlyf(self, data, padding=None):
""" Return recostructed glyf table data, and set the corresponding loca's
locations. Optionally pad glyph offsets to the specified number of bytes.
"""
self.ttFont['loca'] = WOFF2LocaTable()
glyfTable = self.ttFont['glyf'] = WOFF2GlyfTable()
glyfTable.reconstruct(data, self.ttFont)
if padding:
glyfTable.padding = padding
data = glyfTable.compile(self.ttFont)
return data
def _reconstructLoca(self):
""" Return reconstructed loca table data. """
if 'loca' not in self.ttFont:
# make sure glyf is reconstructed first
self.tables['glyf'].data = self.reconstructTable('glyf')
locaTable = self.ttFont['loca']
data = locaTable.compile(self.ttFont)
if len(data) != self.tables['loca'].origLength:
raise TTLibError(
"reconstructed 'loca' table doesn't match original size: "
"expected %d, found %d"
% (self.tables['loca'].origLength, len(data)))
return data
def _reconstructHmtx(self, data):
""" Return reconstructed hmtx table data. """
# Before reconstructing 'hmtx' table we need to parse other tables:
# 'glyf' is required for reconstructing the sidebearings from the glyphs'
# bounding box; 'hhea' is needed for the numberOfHMetrics field.
if "glyf" in self.flavorData.transformedTables:
# transformed 'glyf' table is self-contained, thus 'loca' not needed
tableDependencies = ("maxp", "hhea", "glyf")
else:
# decompiling untransformed 'glyf' requires 'loca', which requires 'head'
tableDependencies = ("maxp", "head", "hhea", "loca", "glyf")
for tag in tableDependencies:
self._decompileTable(tag)
hmtxTable = self.ttFont["hmtx"] = WOFF2HmtxTable()
hmtxTable.reconstruct(data, self.ttFont)
data = hmtxTable.compile(self.ttFont)
return data
def _decompileTable(self, tag):
"""Decompile table data and store it inside self.ttFont."""
data = self[tag]
if self.ttFont.isLoaded(tag):
return self.ttFont[tag]
tableClass = getTableClass(tag)
table = tableClass(tag)
self.ttFont.tables[tag] = table
table.decompile(data, self.ttFont)
class WOFF2Writer(SFNTWriter):
flavor = "woff2"
def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
flavor=None, flavorData=None):
if not haveBrotli:
log.error(
'The WOFF2 encoder requires the Brotli Python extension, available at: '
'https://github.com/google/brotli')
raise ImportError("No module named brotli")
self.file = file
self.numTables = numTables
self.sfntVersion = Tag(sfntVersion)
self.flavorData = WOFF2FlavorData(data=flavorData)
self.directoryFormat = woff2DirectoryFormat
self.directorySize = woff2DirectorySize
self.DirectoryEntry = WOFF2DirectoryEntry
self.signature = Tag("wOF2")
self.nextTableOffset = 0
self.transformBuffer = BytesIO()
self.tables = OrderedDict()
# make empty TTFont to store data while normalising and transforming tables
self.ttFont = TTFont(recalcBBoxes=False, recalcTimestamp=False)
def __setitem__(self, tag, data):
"""Associate new entry named 'tag' with raw table data."""
if tag in self.tables:
raise TTLibError("cannot rewrite '%s' table" % tag)
if tag == 'DSIG':
# always drop DSIG table, since the encoding process can invalidate it
self.numTables -= 1
return
entry = self.DirectoryEntry()
entry.tag = Tag(tag)
entry.flags = getKnownTagIndex(entry.tag)
# WOFF2 table data are written to disk only on close(), after all tags
# have been specified
entry.data = data
self.tables[tag] = entry
def close(self):
""" All tags must have been specified. Now write the table data and directory.
"""
if len(self.tables) != self.numTables:
raise TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
if self.sfntVersion in ("\x00\x01\x00\x00", "true"):
isTrueType = True
elif self.sfntVersion == "OTTO":
isTrueType = False
else:
raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
# The WOFF2 spec no longer requires the glyph offsets to be 4-byte aligned.
# However, the reference WOFF2 implementation still fails to reconstruct
# 'unpadded' glyf tables, therefore we need to 'normalise' them.
# See:
# https://github.com/khaledhosny/ots/issues/60
# https://github.com/google/woff2/issues/15
if (
isTrueType
and "glyf" in self.flavorData.transformedTables
and "glyf" in self.tables
):
self._normaliseGlyfAndLoca(padding=4)
self._setHeadTransformFlag()
# To pass the legacy OpenType Sanitiser currently included in browsers,
# we must sort the table directory and data alphabetically by tag.
# See:
# https://github.com/google/woff2/pull/3
# https://lists.w3.org/Archives/Public/public-webfonts-wg/2015Mar/0000.html
# TODO(user): remove to match spec once browsers are on newer OTS
self.tables = OrderedDict(sorted(self.tables.items()))
self.totalSfntSize = self._calcSFNTChecksumsLengthsAndOffsets()
fontData = self._transformTables()
compressedFont = brotli.compress(fontData, mode=brotli.MODE_FONT)
self.totalCompressedSize = len(compressedFont)
self.length = self._calcTotalSize()
self.majorVersion, self.minorVersion = self._getVersion()
self.reserved = 0
directory = self._packTableDirectory()
self.file.seek(0)
self.file.write(pad(directory + compressedFont, size=4))
self._writeFlavorData()
def _normaliseGlyfAndLoca(self, padding=4):
""" Recompile glyf and loca tables, aligning glyph offsets to multiples of
'padding' size. Update the head table's 'indexToLocFormat' accordingly while
compiling loca.
"""
if self.sfntVersion == "OTTO":
return
for tag in ('maxp', 'head', 'loca', 'glyf'):
self._decompileTable(tag)
self.ttFont['glyf'].padding = padding
for tag in ('glyf', 'loca'):
self._compileTable(tag)
def _setHeadTransformFlag(self):
""" Set bit 11 of 'head' table flags to indicate that the font has undergone
a lossless modifying transform. Re-compile head table data."""
self._decompileTable('head')
self.ttFont['head'].flags |= (1 << 11)
self._compileTable('head')
def _decompileTable(self, tag):
""" Fetch table data, decompile it, and store it inside self.ttFont. """
tag = Tag(tag)
if tag not in self.tables:
raise TTLibError("missing required table: %s" % tag)
if self.ttFont.isLoaded(tag):
return
data = self.tables[tag].data
if tag == 'loca':
tableClass = WOFF2LocaTable
elif tag == 'glyf':
tableClass = WOFF2GlyfTable
elif tag == 'hmtx':
tableClass = WOFF2HmtxTable
else:
tableClass = getTableClass(tag)
table = tableClass(tag)
self.ttFont.tables[tag] = table
table.decompile(data, self.ttFont)
def _compileTable(self, tag):
""" Compile table and store it in its 'data' attribute. """
self.tables[tag].data = self.ttFont[tag].compile(self.ttFont)
def _calcSFNTChecksumsLengthsAndOffsets(self):
""" Compute the 'original' SFNT checksums, lengths and offsets for checksum
adjustment calculation. Return the total size of the uncompressed font.
"""
offset = sfntDirectorySize + sfntDirectoryEntrySize * len(self.tables)
for tag, entry in self.tables.items():
data = entry.data
entry.origOffset = offset
entry.origLength = len(data)
if tag == 'head':
entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
else:
entry.checkSum = calcChecksum(data)
offset += (entry.origLength + 3) & ~3
return offset
def _transformTables(self):
"""Return transformed font data."""
transformedTables = self.flavorData.transformedTables
for tag, entry in self.tables.items():
data = None
if tag in transformedTables:
data = self.transformTable(tag)
if data is not None:
entry.transformed = True
if data is None:
# pass-through the table data without transformation
data = entry.data
entry.transformed = False
entry.offset = self.nextTableOffset
entry.saveData(self.transformBuffer, data)
self.nextTableOffset += entry.length
self.writeMasterChecksum()
fontData = self.transformBuffer.getvalue()
return fontData
def transformTable(self, tag):
"""Return transformed table data, or None if some pre-conditions aren't
met -- in which case, the non-transformed table data will be used.
"""
if tag == "loca":
data = b""
elif tag == "glyf":
for tag in ('maxp', 'head', 'loca', 'glyf'):
self._decompileTable(tag)
glyfTable = self.ttFont['glyf']
data = glyfTable.transform(self.ttFont)
elif tag == "hmtx":
if "glyf" not in self.tables:
return
for tag in ("maxp", "head", "hhea", "loca", "glyf", "hmtx"):
self._decompileTable(tag)
hmtxTable = self.ttFont["hmtx"]
data = hmtxTable.transform(self.ttFont) # can be None
else:
raise TTLibError("Transform for table '%s' is unknown" % tag)
return data
def _calcMasterChecksum(self):
"""Calculate checkSumAdjustment."""
tags = list(self.tables.keys())
checksums = []
for i in range(len(tags)):
checksums.append(self.tables[tags[i]].checkSum)
# Create a SFNT directory for checksum calculation purposes
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables, 16)
directory = sstruct.pack(sfntDirectoryFormat, self)
tables = sorted(self.tables.items())
for tag, entry in tables:
sfntEntry = SFNTDirectoryEntry()
sfntEntry.tag = entry.tag
sfntEntry.checkSum = entry.checkSum
sfntEntry.offset = entry.origOffset
sfntEntry.length = entry.origLength
directory = directory + sfntEntry.toString()
directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
assert directory_end == len(directory)
checksums.append(calcChecksum(directory))
checksum = sum(checksums) & 0xffffffff
# BiboAfba!
checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
return checksumadjustment
def writeMasterChecksum(self):
"""Write checkSumAdjustment to the transformBuffer."""
checksumadjustment = self._calcMasterChecksum()
self.transformBuffer.seek(self.tables['head'].offset + 8)
self.transformBuffer.write(struct.pack(">L", checksumadjustment))
def _calcTotalSize(self):
"""Calculate total size of WOFF2 font, including any meta- and/or private data."""
offset = self.directorySize
for entry in self.tables.values():
offset += len(entry.toString())
offset += self.totalCompressedSize
offset = (offset + 3) & ~3
offset = self._calcFlavorDataOffsetsAndSize(offset)
return offset
def _calcFlavorDataOffsetsAndSize(self, start):
"""Calculate offsets and lengths for any meta- and/or private data."""
offset = start
data = self.flavorData
if data.metaData:
self.metaOrigLength = len(data.metaData)
self.metaOffset = offset
self.compressedMetaData = brotli.compress(
data.metaData, mode=brotli.MODE_TEXT)
self.metaLength = len(self.compressedMetaData)
offset += self.metaLength
else:
self.metaOffset = self.metaLength = self.metaOrigLength = 0
self.compressedMetaData = b""
if data.privData:
# make sure private data is padded to 4-byte boundary
offset = (offset + 3) & ~3
self.privOffset = offset
self.privLength = len(data.privData)
offset += self.privLength
else:
self.privOffset = self.privLength = 0
return offset
def _getVersion(self):
"""Return the WOFF2 font's (majorVersion, minorVersion) tuple."""
data = self.flavorData
if data.majorVersion is not None and data.minorVersion is not None:
return data.majorVersion, data.minorVersion
else:
# if None, return 'fontRevision' from 'head' table
if 'head' in self.tables:
return struct.unpack(">HH", self.tables['head'].data[4:8])
else:
return 0, 0
def _packTableDirectory(self):
"""Return WOFF2 table directory data."""
directory = sstruct.pack(self.directoryFormat, self)
for entry in self.tables.values():
directory = directory + entry.toString()
return directory
def _writeFlavorData(self):
"""Write metadata and/or private data using appropiate padding."""
compressedMetaData = self.compressedMetaData
privData = self.flavorData.privData
if compressedMetaData and privData:
compressedMetaData = pad(compressedMetaData, size=4)
if compressedMetaData:
self.file.seek(self.metaOffset)
assert self.file.tell() == self.metaOffset
self.file.write(compressedMetaData)
if privData:
self.file.seek(self.privOffset)
assert self.file.tell() == self.privOffset
self.file.write(privData)
def reordersTables(self):
return True
# -- woff2 directory helpers and cruft
woff2DirectoryFormat = """
> # big endian
signature: 4s # "wOF2"
sfntVersion: 4s
length: L # total woff2 file size
numTables: H # number of tables
reserved: H # set to 0
totalSfntSize: L # uncompressed size
totalCompressedSize: L # compressed size
majorVersion: H # major version of WOFF file
minorVersion: H # minor version of WOFF file
metaOffset: L # offset to metadata block
metaLength: L # length of compressed metadata
metaOrigLength: L # length of uncompressed metadata
privOffset: L # offset to private data block
privLength: L # length of private data block
"""
woff2DirectorySize = sstruct.calcsize(woff2DirectoryFormat)
woff2KnownTags = (
"cmap", "head", "hhea", "hmtx", "maxp", "name", "OS/2", "post", "cvt ",
"fpgm", "glyf", "loca", "prep", "CFF ", "VORG", "EBDT", "EBLC", "gasp",
"hdmx", "kern", "LTSH", "PCLT", "VDMX", "vhea", "vmtx", "BASE", "GDEF",
"GPOS", "GSUB", "EBSC", "JSTF", "MATH", "CBDT", "CBLC", "COLR", "CPAL",
"SVG ", "sbix", "acnt", "avar", "bdat", "bloc", "bsln", "cvar", "fdsc",
"feat", "fmtx", "fvar", "gvar", "hsty", "just", "lcar", "mort", "morx",
"opbd", "prop", "trak", "Zapf", "Silf", "Glat", "Gloc", "Feat", "Sill")
woff2FlagsFormat = """
> # big endian
flags: B # table type and flags
"""
woff2FlagsSize = sstruct.calcsize(woff2FlagsFormat)
woff2UnknownTagFormat = """
> # big endian
tag: 4s # 4-byte tag (optional)
"""
woff2UnknownTagSize = sstruct.calcsize(woff2UnknownTagFormat)
woff2UnknownTagIndex = 0x3F
woff2Base128MaxSize = 5
woff2DirectoryEntryMaxSize = woff2FlagsSize + woff2UnknownTagSize + 2 * woff2Base128MaxSize
woff2TransformedTableTags = ('glyf', 'loca')
woff2GlyfTableFormat = """
> # big endian
version: L # = 0x00000000
numGlyphs: H # Number of glyphs
indexFormat: H # Offset format for loca table
nContourStreamSize: L # Size of nContour stream
nPointsStreamSize: L # Size of nPoints stream
flagStreamSize: L # Size of flag stream
glyphStreamSize: L # Size of glyph stream
compositeStreamSize: L # Size of composite stream
bboxStreamSize: L # Comnined size of bboxBitmap and bboxStream
instructionStreamSize: L # Size of instruction stream
"""
woff2GlyfTableFormatSize = sstruct.calcsize(woff2GlyfTableFormat)
bboxFormat = """
> # big endian
xMin: h
yMin: h
xMax: h
yMax: h
"""
def getKnownTagIndex(tag):
"""Return index of 'tag' in woff2KnownTags list. Return 63 if not found."""
for i in range(len(woff2KnownTags)):
if tag == woff2KnownTags[i]:
return i
return woff2UnknownTagIndex
class WOFF2DirectoryEntry(DirectoryEntry):
def fromFile(self, file):
pos = file.tell()
data = file.read(woff2DirectoryEntryMaxSize)
left = self.fromString(data)
consumed = len(data) - len(left)
file.seek(pos + consumed)
def fromString(self, data):
if len(data) < 1:
raise TTLibError("can't read table 'flags': not enough data")
dummy, data = sstruct.unpack2(woff2FlagsFormat, data, self)
if self.flags & 0x3F == 0x3F:
# if bits [0..5] of the flags byte == 63, read a 4-byte arbitrary tag value
if len(data) < woff2UnknownTagSize:
raise TTLibError("can't read table 'tag': not enough data")
dummy, data = sstruct.unpack2(woff2UnknownTagFormat, data, self)
else:
# otherwise, tag is derived from a fixed 'Known Tags' table
self.tag = woff2KnownTags[self.flags & 0x3F]
self.tag = Tag(self.tag)
self.origLength, data = unpackBase128(data)
self.length = self.origLength
if self.transformed:
self.length, data = unpackBase128(data)
if self.tag == 'loca' and self.length != 0:
raise TTLibError(
"the transformLength of the 'loca' table must be 0")
# return left over data
return data
def toString(self):
data = bytechr(self.flags)
if (self.flags & 0x3F) == 0x3F:
data += struct.pack('>4s', self.tag.tobytes())
data += packBase128(self.origLength)
if self.transformed:
data += packBase128(self.length)
return data
@property
def transformVersion(self):
"""Return bits 6-7 of table entry's flags, which indicate the preprocessing
transformation version number (between 0 and 3).
"""
return self.flags >> 6
@transformVersion.setter
def transformVersion(self, value):
assert 0 <= value <= 3
self.flags |= value << 6
@property
def transformed(self):
"""Return True if the table has any transformation, else return False."""
# For all tables in a font, except for 'glyf' and 'loca', the transformation
# version 0 indicates the null transform (where the original table data is
# passed directly to the Brotli compressor). For 'glyf' and 'loca' tables,
# transformation version 3 indicates the null transform
if self.tag in {"glyf", "loca"}:
return self.transformVersion != 3
else:
return self.transformVersion != 0
@transformed.setter
def transformed(self, booleanValue):
# here we assume that a non-null transform means version 0 for 'glyf' and
# 'loca' and 1 for every other table (e.g. hmtx); but that may change as
# new transformation formats are introduced in the future (if ever).
if self.tag in {"glyf", "loca"}:
self.transformVersion = 3 if not booleanValue else 0
else:
self.transformVersion = int(booleanValue)
class WOFF2LocaTable(getTableClass('loca')):
"""Same as parent class. The only difference is that it attempts to preserve
the 'indexFormat' as encoded in the WOFF2 glyf table.
"""
def __init__(self, tag=None):
self.tableTag = Tag(tag or 'loca')
def compile(self, ttFont):
try:
max_location = max(self.locations)
except AttributeError:
self.set([])
max_location = 0
if 'glyf' in ttFont and hasattr(ttFont['glyf'], 'indexFormat'):
# copile loca using the indexFormat specified in the WOFF2 glyf table
indexFormat = ttFont['glyf'].indexFormat
if indexFormat == 0:
if max_location >= 0x20000:
raise TTLibError("indexFormat is 0 but local offsets > 0x20000")
if not all(l % 2 == 0 for l in self.locations):
raise TTLibError("indexFormat is 0 but local offsets not multiples of 2")
locations = array.array("H")
for i in range(len(self.locations)):
locations.append(self.locations[i] // 2)
else:
locations = array.array("I", self.locations)
if sys.byteorder != "big": locations.byteswap()
data = locations.tobytes()
else:
# use the most compact indexFormat given the current glyph offsets
data = super(WOFF2LocaTable, self).compile(ttFont)
return data
class WOFF2GlyfTable(getTableClass('glyf')):
"""Decoder/Encoder for WOFF2 'glyf' table transform."""
subStreams = (
'nContourStream', 'nPointsStream', 'flagStream', 'glyphStream',
'compositeStream', 'bboxStream', 'instructionStream')
def __init__(self, tag=None):
self.tableTag = Tag(tag or 'glyf')
def reconstruct(self, data, ttFont):
""" Decompile transformed 'glyf' data. """
inputDataSize = len(data)
if inputDataSize < woff2GlyfTableFormatSize:
raise TTLibError("not enough 'glyf' data")
dummy, data = sstruct.unpack2(woff2GlyfTableFormat, data, self)
offset = woff2GlyfTableFormatSize
for stream in self.subStreams:
size = getattr(self, stream + 'Size')
setattr(self, stream, data[:size])
data = data[size:]
offset += size
if offset != inputDataSize:
raise TTLibError(
"incorrect size of transformed 'glyf' table: expected %d, received %d bytes"
% (offset, inputDataSize))
bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
bboxBitmap = self.bboxStream[:bboxBitmapSize]
self.bboxBitmap = array.array('B', bboxBitmap)
self.bboxStream = self.bboxStream[bboxBitmapSize:]
self.nContourStream = array.array("h", self.nContourStream)
if sys.byteorder != "big": self.nContourStream.byteswap()
assert len(self.nContourStream) == self.numGlyphs
if 'head' in ttFont:
ttFont['head'].indexToLocFormat = self.indexFormat
try:
self.glyphOrder = ttFont.getGlyphOrder()
except:
self.glyphOrder = None
if self.glyphOrder is None:
self.glyphOrder = [".notdef"]
self.glyphOrder.extend(["glyph%.5d" % i for i in range(1, self.numGlyphs)])
else:
if len(self.glyphOrder) != self.numGlyphs:
raise TTLibError(
"incorrect glyphOrder: expected %d glyphs, found %d" %
(len(self.glyphOrder), self.numGlyphs))
glyphs = self.glyphs = {}
for glyphID, glyphName in enumerate(self.glyphOrder):
glyph = self._decodeGlyph(glyphID)
glyphs[glyphName] = glyph
def transform(self, ttFont):
""" Return transformed 'glyf' data """
self.numGlyphs = len(self.glyphs)
assert len(self.glyphOrder) == self.numGlyphs
if 'maxp' in ttFont:
ttFont['maxp'].numGlyphs = self.numGlyphs
self.indexFormat = ttFont['head'].indexToLocFormat
for stream in self.subStreams:
setattr(self, stream, b"")
bboxBitmapSize = ((self.numGlyphs + 31) >> 5) << 2
self.bboxBitmap = array.array('B', [0]*bboxBitmapSize)
for glyphID in range(self.numGlyphs):
self._encodeGlyph(glyphID)
self.bboxStream = self.bboxBitmap.tobytes() + self.bboxStream
for stream in self.subStreams:
setattr(self, stream + 'Size', len(getattr(self, stream)))
self.version = 0
data = sstruct.pack(woff2GlyfTableFormat, self)
data += bytesjoin([getattr(self, s) for s in self.subStreams])
return data
def _decodeGlyph(self, glyphID):
glyph = getTableModule('glyf').Glyph()
glyph.numberOfContours = self.nContourStream[glyphID]
if glyph.numberOfContours == 0:
return glyph
elif glyph.isComposite():
self._decodeComponents(glyph)
else:
self._decodeCoordinates(glyph)
self._decodeBBox(glyphID, glyph)
return glyph
def _decodeComponents(self, glyph):
data = self.compositeStream
glyph.components = []
more = 1
haveInstructions = 0
while more:
component = getTableModule('glyf').GlyphComponent()
more, haveInstr, data = component.decompile(data, self)
haveInstructions = haveInstructions | haveInstr
glyph.components.append(component)
self.compositeStream = data
if haveInstructions:
self._decodeInstructions(glyph)
def _decodeCoordinates(self, glyph):
data = self.nPointsStream
endPtsOfContours = []
endPoint = -1
for i in range(glyph.numberOfContours):
ptsOfContour, data = unpack255UShort(data)
endPoint += ptsOfContour
endPtsOfContours.append(endPoint)
glyph.endPtsOfContours = endPtsOfContours
self.nPointsStream = data
self._decodeTriplets(glyph)
self._decodeInstructions(glyph)
def _decodeInstructions(self, glyph):
glyphStream = self.glyphStream
instructionStream = self.instructionStream
instructionLength, glyphStream = unpack255UShort(glyphStream)
glyph.program = ttProgram.Program()
glyph.program.fromBytecode(instructionStream[:instructionLength])
self.glyphStream = glyphStream
self.instructionStream = instructionStream[instructionLength:]
def _decodeBBox(self, glyphID, glyph):
haveBBox = bool(self.bboxBitmap[glyphID >> 3] & (0x80 >> (glyphID & 7)))
if glyph.isComposite() and not haveBBox:
raise TTLibError('no bbox values for composite glyph %d' % glyphID)
if haveBBox:
dummy, self.bboxStream = sstruct.unpack2(bboxFormat, self.bboxStream, glyph)
else:
glyph.recalcBounds(self)
def _decodeTriplets(self, glyph):
def withSign(flag, baseval):
assert 0 <= baseval and baseval < 65536, 'integer overflow'
return baseval if flag & 1 else -baseval
nPoints = glyph.endPtsOfContours[-1] + 1
flagSize = nPoints
if flagSize > len(self.flagStream):
raise TTLibError("not enough 'flagStream' data")
flagsData = self.flagStream[:flagSize]
self.flagStream = self.flagStream[flagSize:]
flags = array.array('B', flagsData)
triplets = array.array('B', self.glyphStream)
nTriplets = len(triplets)
assert nPoints <= nTriplets
x = 0
y = 0
glyph.coordinates = getTableModule('glyf').GlyphCoordinates.zeros(nPoints)
glyph.flags = array.array("B")
tripletIndex = 0
for i in range(nPoints):
flag = flags[i]
onCurve = not bool(flag >> 7)
flag &= 0x7f
if flag < 84:
nBytes = 1
elif flag < 120:
nBytes = 2
elif flag < 124:
nBytes = 3
else:
nBytes = 4
assert ((tripletIndex + nBytes) <= nTriplets)
if flag < 10:
dx = 0
dy = withSign(flag, ((flag & 14) << 7) + triplets[tripletIndex])
elif flag < 20:
dx = withSign(flag, (((flag - 10) & 14) << 7) + triplets[tripletIndex])
dy = 0
elif flag < 84:
b0 = flag - 20
b1 = triplets[tripletIndex]
dx = withSign(flag, 1 + (b0 & 0x30) + (b1 >> 4))
dy = withSign(flag >> 1, 1 + ((b0 & 0x0c) << 2) + (b1 & 0x0f))
elif flag < 120:
b0 = flag - 84
dx = withSign(flag, 1 + ((b0 // 12) << 8) + triplets[tripletIndex])
dy = withSign(flag >> 1,
1 + (((b0 % 12) >> 2) << 8) + triplets[tripletIndex + 1])
elif flag < 124:
b2 = triplets[tripletIndex + 1]
dx = withSign(flag, (triplets[tripletIndex] << 4) + (b2 >> 4))
dy = withSign(flag >> 1,
((b2 & 0x0f) << 8) + triplets[tripletIndex + 2])
else:
dx = withSign(flag,
(triplets[tripletIndex] << 8) + triplets[tripletIndex + 1])
dy = withSign(flag >> 1,
(triplets[tripletIndex + 2] << 8) + triplets[tripletIndex + 3])
tripletIndex += nBytes
x += dx
y += dy
glyph.coordinates[i] = (x, y)
glyph.flags.append(int(onCurve))
bytesConsumed = tripletIndex
self.glyphStream = self.glyphStream[bytesConsumed:]
def _encodeGlyph(self, glyphID):
glyphName = self.getGlyphName(glyphID)
glyph = self[glyphName]
self.nContourStream += struct.pack(">h", glyph.numberOfContours)
if glyph.numberOfContours == 0:
return
elif glyph.isComposite():
self._encodeComponents(glyph)
else:
self._encodeCoordinates(glyph)
self._encodeBBox(glyphID, glyph)
def _encodeComponents(self, glyph):
lastcomponent = len(glyph.components) - 1
more = 1
haveInstructions = 0
for i in range(len(glyph.components)):
if i == lastcomponent:
haveInstructions = hasattr(glyph, "program")
more = 0
component = glyph.components[i]
self.compositeStream += component.compile(more, haveInstructions, self)
if haveInstructions:
self._encodeInstructions(glyph)
def _encodeCoordinates(self, glyph):
lastEndPoint = -1
for endPoint in glyph.endPtsOfContours:
ptsOfContour = endPoint - lastEndPoint
self.nPointsStream += pack255UShort(ptsOfContour)
lastEndPoint = endPoint
self._encodeTriplets(glyph)
self._encodeInstructions(glyph)
def _encodeInstructions(self, glyph):
instructions = glyph.program.getBytecode()
self.glyphStream += pack255UShort(len(instructions))
self.instructionStream += instructions
def _encodeBBox(self, glyphID, glyph):
assert glyph.numberOfContours != 0, "empty glyph has no bbox"
if not glyph.isComposite():
# for simple glyphs, compare the encoded bounding box info with the calculated
# values, and if they match omit the bounding box info
currentBBox = glyph.xMin, glyph.yMin, glyph.xMax, glyph.yMax
calculatedBBox = calcIntBounds(glyph.coordinates)
if currentBBox == calculatedBBox:
return
self.bboxBitmap[glyphID >> 3] |= 0x80 >> (glyphID & 7)
self.bboxStream += sstruct.pack(bboxFormat, glyph)
def _encodeTriplets(self, glyph):
assert len(glyph.coordinates) == len(glyph.flags)
coordinates = glyph.coordinates.copy()
coordinates.absoluteToRelative()
flags = array.array('B')
triplets = array.array('B')
for i in range(len(coordinates)):
onCurve = glyph.flags[i] & _g_l_y_f.flagOnCurve
x, y = coordinates[i]
absX = abs(x)
absY = abs(y)
onCurveBit = 0 if onCurve else 128
xSignBit = 0 if (x < 0) else 1
ySignBit = 0 if (y < 0) else 1
xySignBits = xSignBit + 2 * ySignBit
if x == 0 and absY < 1280:
flags.append(onCurveBit + ((absY & 0xf00) >> 7) + ySignBit)
triplets.append(absY & 0xff)
elif y == 0 and absX < 1280:
flags.append(onCurveBit + 10 + ((absX & 0xf00) >> 7) + xSignBit)
triplets.append(absX & 0xff)
elif absX < 65 and absY < 65:
flags.append(onCurveBit + 20 + ((absX - 1) & 0x30) + (((absY - 1) & 0x30) >> 2) + xySignBits)
triplets.append((((absX - 1) & 0xf) << 4) | ((absY - 1) & 0xf))
elif absX < 769 and absY < 769:
flags.append(onCurveBit + 84 + 12 * (((absX - 1) & 0x300) >> 8) + (((absY - 1) & 0x300) >> 6) + xySignBits)
triplets.append((absX - 1) & 0xff)
triplets.append((absY - 1) & 0xff)
elif absX < 4096 and absY < 4096:
flags.append(onCurveBit + 120 + xySignBits)
triplets.append(absX >> 4)
triplets.append(((absX & 0xf) << 4) | (absY >> 8))
triplets.append(absY & 0xff)
else:
flags.append(onCurveBit + 124 + xySignBits)
triplets.append(absX >> 8)
triplets.append(absX & 0xff)
triplets.append(absY >> 8)
triplets.append(absY & 0xff)
self.flagStream += flags.tobytes()
self.glyphStream += triplets.tobytes()
class WOFF2HmtxTable(getTableClass("hmtx")):
def __init__(self, tag=None):
self.tableTag = Tag(tag or 'hmtx')
def reconstruct(self, data, ttFont):
flags, = struct.unpack(">B", data[:1])
data = data[1:]
if flags & 0b11111100 != 0:
raise TTLibError("Bits 2-7 of '%s' flags are reserved" % self.tableTag)
# When bit 0 is _not_ set, the lsb[] array is present
hasLsbArray = flags & 1 == 0
# When bit 1 is _not_ set, the leftSideBearing[] array is present
hasLeftSideBearingArray = flags & 2 == 0
if hasLsbArray and hasLeftSideBearingArray:
raise TTLibError(
"either bits 0 or 1 (or both) must set in transformed '%s' flags"
% self.tableTag
)
glyfTable = ttFont["glyf"]
headerTable = ttFont["hhea"]
glyphOrder = glyfTable.glyphOrder
numGlyphs = len(glyphOrder)
numberOfHMetrics = min(int(headerTable.numberOfHMetrics), numGlyphs)
assert len(data) >= 2 * numberOfHMetrics
advanceWidthArray = array.array("H", data[:2 * numberOfHMetrics])
if sys.byteorder != "big":
advanceWidthArray.byteswap()
data = data[2 * numberOfHMetrics:]
if hasLsbArray:
assert len(data) >= 2 * numberOfHMetrics
lsbArray = array.array("h", data[:2 * numberOfHMetrics])
if sys.byteorder != "big":
lsbArray.byteswap()
data = data[2 * numberOfHMetrics:]
else:
# compute (proportional) glyphs' lsb from their xMin
lsbArray = array.array("h")
for i, glyphName in enumerate(glyphOrder):
if i >= numberOfHMetrics:
break
glyph = glyfTable[glyphName]
xMin = getattr(glyph, "xMin", 0)
lsbArray.append(xMin)
numberOfSideBearings = numGlyphs - numberOfHMetrics
if hasLeftSideBearingArray:
assert len(data) >= 2 * numberOfSideBearings
leftSideBearingArray = array.array("h", data[:2 * numberOfSideBearings])
if sys.byteorder != "big":
leftSideBearingArray.byteswap()
data = data[2 * numberOfSideBearings:]
else:
# compute (monospaced) glyphs' leftSideBearing from their xMin
leftSideBearingArray = array.array("h")
for i, glyphName in enumerate(glyphOrder):
if i < numberOfHMetrics:
continue
glyph = glyfTable[glyphName]
xMin = getattr(glyph, "xMin", 0)
leftSideBearingArray.append(xMin)
if data:
raise TTLibError("too much '%s' table data" % self.tableTag)
self.metrics = {}
for i in range(numberOfHMetrics):
glyphName = glyphOrder[i]
advanceWidth, lsb = advanceWidthArray[i], lsbArray[i]
self.metrics[glyphName] = (advanceWidth, lsb)
lastAdvance = advanceWidthArray[-1]
for i in range(numberOfSideBearings):
glyphName = glyphOrder[i + numberOfHMetrics]
self.metrics[glyphName] = (lastAdvance, leftSideBearingArray[i])
def transform(self, ttFont):
glyphOrder = ttFont.getGlyphOrder()
glyf = ttFont["glyf"]
hhea = ttFont["hhea"]
numberOfHMetrics = hhea.numberOfHMetrics
# check if any of the proportional glyphs has left sidebearings that
# differ from their xMin bounding box values.
hasLsbArray = False
for i in range(numberOfHMetrics):
glyphName = glyphOrder[i]
lsb = self.metrics[glyphName][1]
if lsb != getattr(glyf[glyphName], "xMin", 0):
hasLsbArray = True
break
# do the same for the monospaced glyphs (if any) at the end of hmtx table
hasLeftSideBearingArray = False
for i in range(numberOfHMetrics, len(glyphOrder)):
glyphName = glyphOrder[i]
lsb = self.metrics[glyphName][1]
if lsb != getattr(glyf[glyphName], "xMin", 0):
hasLeftSideBearingArray = True
break
# if we need to encode both sidebearings arrays, then no transformation is
# applicable, and we must use the untransformed hmtx data
if hasLsbArray and hasLeftSideBearingArray:
return
# set bit 0 and 1 when the respective arrays are _not_ present
flags = 0
if not hasLsbArray:
flags |= 1 << 0
if not hasLeftSideBearingArray:
flags |= 1 << 1
data = struct.pack(">B", flags)
advanceWidthArray = array.array(
"H",
[
self.metrics[glyphName][0]
for i, glyphName in enumerate(glyphOrder)
if i < numberOfHMetrics
]
)
if sys.byteorder != "big":
advanceWidthArray.byteswap()
data += advanceWidthArray.tobytes()
if hasLsbArray:
lsbArray = array.array(
"h",
[
self.metrics[glyphName][1]
for i, glyphName in enumerate(glyphOrder)
if i < numberOfHMetrics
]
)
if sys.byteorder != "big":
lsbArray.byteswap()
data += lsbArray.tobytes()
if hasLeftSideBearingArray:
leftSideBearingArray = array.array(
"h",
[
self.metrics[glyphOrder[i]][1]
for i in range(numberOfHMetrics, len(glyphOrder))
]
)
if sys.byteorder != "big":
leftSideBearingArray.byteswap()
data += leftSideBearingArray.tobytes()
return data
class WOFF2FlavorData(WOFFFlavorData):
Flavor = 'woff2'
def __init__(self, reader=None, data=None, transformedTables=None):
"""Data class that holds the WOFF2 header major/minor version, any
metadata or private data (as bytes strings), and the set of
table tags that have transformations applied (if reader is not None),
or will have once the WOFF2 font is compiled.
Args:
reader: an SFNTReader (or subclass) object to read flavor data from.
data: another WOFFFlavorData object to initialise data from.
transformedTables: set of strings containing table tags to be transformed.
Raises:
ImportError if the brotli module is not installed.
NOTE: The 'reader' argument, on the one hand, and the 'data' and
'transformedTables' arguments, on the other hand, are mutually exclusive.
"""
if not haveBrotli:
raise ImportError("No module named brotli")
if reader is not None:
if data is not None:
raise TypeError(
"'reader' and 'data' arguments are mutually exclusive"
)
if transformedTables is not None:
raise TypeError(
"'reader' and 'transformedTables' arguments are mutually exclusive"
)
if transformedTables is not None and (
"glyf" in transformedTables and "loca" not in transformedTables
or "loca" in transformedTables and "glyf" not in transformedTables
):
raise ValueError(
"'glyf' and 'loca' must be transformed (or not) together"
)
super(WOFF2FlavorData, self).__init__(reader=reader)
if reader:
transformedTables = [
tag
for tag, entry in reader.tables.items()
if entry.transformed
]
elif data:
self.majorVersion = data.majorVersion
self.majorVersion = data.minorVersion
self.metaData = data.metaData
self.privData = data.privData
if transformedTables is None and hasattr(data, "transformedTables"):
transformedTables = data.transformedTables
if transformedTables is None:
transformedTables = woff2TransformedTableTags
self.transformedTables = set(transformedTables)
def _decompress(self, rawData):
return brotli.decompress(rawData)
def unpackBase128(data):
r""" Read one to five bytes from UIntBase128-encoded input string, and return
a tuple containing the decoded integer plus any leftover data.
>>> unpackBase128(b'\x3f\x00\x00') == (63, b"\x00\x00")
True
>>> unpackBase128(b'\x8f\xff\xff\xff\x7f')[0] == 4294967295
True
>>> unpackBase128(b'\x80\x80\x3f') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128 value must not start with leading zeros
>>> unpackBase128(b'\x8f\xff\xff\xff\xff\x7f')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128-encoded sequence is longer than 5 bytes
>>> unpackBase128(b'\x90\x80\x80\x80\x00')[0] # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in ?
TTLibError: UIntBase128 value exceeds 2**32-1
"""
if len(data) == 0:
raise TTLibError('not enough data to unpack UIntBase128')
result = 0
if byteord(data[0]) == 0x80:
# font must be rejected if UIntBase128 value starts with 0x80
raise TTLibError('UIntBase128 value must not start with leading zeros')
for i in range(woff2Base128MaxSize):
if len(data) == 0:
raise TTLibError('not enough data to unpack UIntBase128')
code = byteord(data[0])
data = data[1:]
# if any of the top seven bits are set then we're about to overflow
if result & 0xFE000000:
raise TTLibError('UIntBase128 value exceeds 2**32-1')
# set current value = old value times 128 bitwise-or (byte bitwise-and 127)
result = (result << 7) | (code & 0x7f)
# repeat until the most significant bit of byte is false
if (code & 0x80) == 0:
# return result plus left over data
return result, data
# make sure not to exceed the size bound
raise TTLibError('UIntBase128-encoded sequence is longer than 5 bytes')
def base128Size(n):
""" Return the length in bytes of a UIntBase128-encoded sequence with value n.
>>> base128Size(0)
1
>>> base128Size(24567)
3
>>> base128Size(2**32-1)
5
"""
assert n >= 0
size = 1
while n >= 128:
size += 1
n >>= 7
return size
def packBase128(n):
r""" Encode unsigned integer in range 0 to 2**32-1 (inclusive) to a string of
bytes using UIntBase128 variable-length encoding. Produce the shortest possible
encoding.
>>> packBase128(63) == b"\x3f"
True
>>> packBase128(2**32-1) == b'\x8f\xff\xff\xff\x7f'
True
"""
if n < 0 or n >= 2**32:
raise TTLibError(
"UIntBase128 format requires 0 <= integer <= 2**32-1")
data = b''
size = base128Size(n)
for i in range(size):
b = (n >> (7 * (size - i - 1))) & 0x7f
if i < size - 1:
b |= 0x80
data += struct.pack('B', b)
return data
def unpack255UShort(data):
""" Read one to three bytes from 255UInt16-encoded input string, and return a
tuple containing the decoded integer plus any leftover data.
>>> unpack255UShort(bytechr(252))[0]
252
Note that some numbers (e.g. 506) can have multiple encodings:
>>> unpack255UShort(struct.pack("BB", 254, 0))[0]
506
>>> unpack255UShort(struct.pack("BB", 255, 253))[0]
506
>>> unpack255UShort(struct.pack("BBB", 253, 1, 250))[0]
506
"""
code = byteord(data[:1])
data = data[1:]
if code == 253:
# read two more bytes as an unsigned short
if len(data) < 2:
raise TTLibError('not enough data to unpack 255UInt16')
result, = struct.unpack(">H", data[:2])
data = data[2:]
elif code == 254:
# read another byte, plus 253 * 2
if len(data) == 0:
raise TTLibError('not enough data to unpack 255UInt16')
result = byteord(data[:1])
result += 506
data = data[1:]
elif code == 255:
# read another byte, plus 253
if len(data) == 0:
raise TTLibError('not enough data to unpack 255UInt16')
result = byteord(data[:1])
result += 253
data = data[1:]
else:
# leave as is if lower than 253
result = code
# return result plus left over data
return result, data
def pack255UShort(value):
r""" Encode unsigned integer in range 0 to 65535 (inclusive) to a bytestring
using 255UInt16 variable-length encoding.
>>> pack255UShort(252) == b'\xfc'
True
>>> pack255UShort(506) == b'\xfe\x00'
True
>>> pack255UShort(762) == b'\xfd\x02\xfa'
True
"""
if value < 0 or value > 0xFFFF:
raise TTLibError(
"255UInt16 format requires 0 <= integer <= 65535")
if value < 253:
return struct.pack(">B", value)
elif value < 506:
return struct.pack(">BB", 255, value - 253)
elif value < 762:
return struct.pack(">BB", 254, value - 506)
else:
return struct.pack(">BH", 253, value)
def compress(input_file, output_file, transform_tables=None):
"""Compress OpenType font to WOFF2.
Args:
input_file: a file path, file or file-like object (open in binary mode)
containing an OpenType font (either CFF- or TrueType-flavored).
output_file: a file path, file or file-like object where to save the
compressed WOFF2 font.
transform_tables: Optional[Iterable[str]]: a set of table tags for which
to enable preprocessing transformations. By default, only 'glyf'
and 'loca' tables are transformed. An empty set means disable all
transformations.
"""
log.info("Processing %s => %s" % (input_file, output_file))
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
font.flavor = "woff2"
if transform_tables is not None:
font.flavorData = WOFF2FlavorData(
data=font.flavorData, transformedTables=transform_tables
)
font.save(output_file, reorderTables=False)
def decompress(input_file, output_file):
"""Decompress WOFF2 font to OpenType font.
Args:
input_file: a file path, file or file-like object (open in binary mode)
containing a compressed WOFF2 font.
output_file: a file path, file or file-like object where to save the
decompressed OpenType font.
"""
log.info("Processing %s => %s" % (input_file, output_file))
font = TTFont(input_file, recalcBBoxes=False, recalcTimestamp=False)
font.flavor = None
font.flavorData = None
font.save(output_file, reorderTables=True)
def main(args=None):
"""Compress and decompress WOFF2 fonts"""
import argparse
from fontTools import configLogger
from fontTools.ttx import makeOutputFileName
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print(subparser.format_help())
parser.exit()
class _NoGlyfTransformAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.transform_tables.difference_update({"glyf", "loca"})
class _HmtxTransformAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.transform_tables.add("hmtx")
parser = argparse.ArgumentParser(
prog="fonttools ttLib.woff2",
description=main.__doc__,
add_help = False
)
parser.add_argument('-h', '--help', action=_HelpAction,
help='show this help message and exit')
parser_group = parser.add_subparsers(title="sub-commands")
parser_compress = parser_group.add_parser("compress",
description = "Compress a TTF or OTF font to WOFF2")
parser_decompress = parser_group.add_parser("decompress",
description = "Decompress a WOFF2 font to OTF")
for subparser in (parser_compress, parser_decompress):
group = subparser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-v",
"--verbose",
action="store_true",
help="print more messages to console",
)
group.add_argument(
"-q",
"--quiet",
action="store_true",
help="do not print messages to console",
)
parser_compress.add_argument(
"input_file",
metavar="INPUT",
help="the input OpenType font (.ttf or .otf)",
)
parser_decompress.add_argument(
"input_file",
metavar="INPUT",
help="the input WOFF2 font",
)
parser_compress.add_argument(
"-o",
"--output-file",
metavar="OUTPUT",
help="the output WOFF2 font",
)
parser_decompress.add_argument(
"-o",
"--output-file",
metavar="OUTPUT",
help="the output OpenType font",
)
transform_group = parser_compress.add_argument_group()
transform_group.add_argument(
"--no-glyf-transform",
dest="transform_tables",
nargs=0,
action=_NoGlyfTransformAction,
help="Do not transform glyf (and loca) tables",
)
transform_group.add_argument(
"--hmtx-transform",
dest="transform_tables",
nargs=0,
action=_HmtxTransformAction,
help="Enable optional transformation for 'hmtx' table",
)
parser_compress.set_defaults(
subcommand=compress,
transform_tables={"glyf", "loca"},
)
parser_decompress.set_defaults(subcommand=decompress)
options = vars(parser.parse_args(args))
subcommand = options.pop("subcommand", None)
if not subcommand:
parser.print_help()
return
quiet = options.pop("quiet")
verbose = options.pop("verbose")
configLogger(
level=("ERROR" if quiet else "DEBUG" if verbose else "INFO"),
)
if not options["output_file"]:
if subcommand is compress:
extension = ".woff2"
elif subcommand is decompress:
# choose .ttf/.otf file extension depending on sfntVersion
with open(options["input_file"], "rb") as f:
f.seek(4) # skip 'wOF2' signature
sfntVersion = f.read(4)
assert len(sfntVersion) == 4, "not enough data"
extension = ".otf" if sfntVersion == b"OTTO" else ".ttf"
else:
raise AssertionError(subcommand)
options["output_file"] = makeOutputFileName(
options["input_file"], outputDir=None, extension=extension
)
try:
subcommand(**options)
except TTLibError as e:
parser.error(e)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/woff2.py",
"copies": "5",
"size": "49575",
"license": "apache-2.0",
"hash": -8221573259366511000,
"line_mean": 31.3385518591,
"line_max": 111,
"alpha_frac": 0.6975289965,
"autogenerated": false,
"ratio": 3.0874385003425298,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03714136808236252,
"num_lines": 1533
} |
from fontTools.misc.py23 import Tag, bytesjoin
from .DefaultTable import DefaultTable
import sys
import array
import struct
import logging
log = logging.getLogger(__name__)
class OverflowErrorRecord(object):
def __init__(self, overflowTuple):
self.tableType = overflowTuple[0]
self.LookupListIndex = overflowTuple[1]
self.SubTableIndex = overflowTuple[2]
self.itemName = overflowTuple[3]
self.itemIndex = overflowTuple[4]
def __repr__(self):
return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
class OTLOffsetOverflowError(Exception):
def __init__(self, overflowErrorRecord):
self.value = overflowErrorRecord
def __str__(self):
return repr(self.value)
class BaseTTXConverter(DefaultTable):
"""Generic base class for TTX table converters. It functions as an
adapter between the TTX (ttLib actually) table model and the model
we use for OpenType tables, which is necessarily subtly different.
"""
def decompile(self, data, font):
from . import otTables
reader = OTTableReader(data, tableTag=self.tableTag)
tableClass = getattr(otTables, self.tableTag)
self.table = tableClass()
self.table.decompile(reader, font)
def compile(self, font):
""" Create a top-level OTTableWriter for the GPOS/GSUB table.
Call the compile method for the the table
for each 'converter' record in the table converter list
call converter's write method for each item in the value.
- For simple items, the write method adds a string to the
writer's self.items list.
- For Struct/Table/Subtable items, it add first adds new writer to the
to the writer's self.items, then calls the item's compile method.
This creates a tree of writers, rooted at the GUSB/GPOS writer, with
each writer representing a table, and the writer.items list containing
the child data strings and writers.
call the getAllData method
call _doneWriting, which removes duplicates
call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
Traverse the flat list of tables, calling getDataLength on each to update their position
Traverse the flat list of tables again, calling getData each get the data in the table, now that
pos's and offset are known.
If a lookup subtable overflows an offset, we have to start all over.
"""
overflowRecord = None
while True:
try:
writer = OTTableWriter(tableTag=self.tableTag)
self.table.compile(writer, font)
return writer.getAllData()
except OTLOffsetOverflowError as e:
if overflowRecord == e.value:
raise # Oh well...
overflowRecord = e.value
log.info("Attempting to fix OTLOffsetOverflowError %s", e)
lastItem = overflowRecord
ok = 0
if overflowRecord.itemName is None:
from .otTables import fixLookupOverFlows
ok = fixLookupOverFlows(font, overflowRecord)
else:
from .otTables import fixSubTableOverFlows
ok = fixSubTableOverFlows(font, overflowRecord)
if not ok:
# Try upgrading lookup to Extension and hope
# that cross-lookup sharing not happening would
# fix overflow...
from .otTables import fixLookupOverFlows
ok = fixLookupOverFlows(font, overflowRecord)
if not ok:
raise
def toXML(self, writer, font):
self.table.toXML2(writer, font)
def fromXML(self, name, attrs, content, font):
from . import otTables
if not hasattr(self, "table"):
tableClass = getattr(otTables, self.tableTag)
self.table = tableClass()
self.table.fromXML(name, attrs, content, font)
self.table.populateDefaults()
class OTTableReader(object):
"""Helper class to retrieve data from an OpenType table."""
__slots__ = ('data', 'offset', 'pos', 'localState', 'tableTag')
def __init__(self, data, localState=None, offset=0, tableTag=None):
self.data = data
self.offset = offset
self.pos = offset
self.localState = localState
self.tableTag = tableTag
def advance(self, count):
self.pos += count
def seek(self, pos):
self.pos = pos
def copy(self):
other = self.__class__(self.data, self.localState, self.offset, self.tableTag)
other.pos = self.pos
return other
def getSubReader(self, offset):
offset = self.offset + offset
return self.__class__(self.data, self.localState, offset, self.tableTag)
def readValue(self, typecode, staticSize):
pos = self.pos
newpos = pos + staticSize
value, = struct.unpack(f">{typecode}", self.data[pos:newpos])
self.pos = newpos
return value
def readUShort(self):
return self.readValue("H", staticSize=2)
def readArray(self, typecode, staticSize, count):
pos = self.pos
newpos = pos + count * staticSize
value = array.array(typecode, self.data[pos:newpos])
if sys.byteorder != "big": value.byteswap()
self.pos = newpos
return value
def readUShortArray(self, count):
return self.readArray("H", staticSize=2, count=count)
def readInt8(self):
return self.readValue("b", staticSize=1)
def readShort(self):
return self.readValue("h", staticSize=2)
def readLong(self):
return self.readValue("l", staticSize=4)
def readUInt8(self):
return self.readValue("B", staticSize=1)
def readUInt24(self):
pos = self.pos
newpos = pos + 3
value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
self.pos = newpos
return value
def readULong(self):
return self.readValue("L", staticSize=4)
def readTag(self):
pos = self.pos
newpos = pos + 4
value = Tag(self.data[pos:newpos])
assert len(value) == 4, value
self.pos = newpos
return value
def readData(self, count):
pos = self.pos
newpos = pos + count
value = self.data[pos:newpos]
self.pos = newpos
return value
def __setitem__(self, name, value):
state = self.localState.copy() if self.localState else dict()
state[name] = value
self.localState = state
def __getitem__(self, name):
return self.localState and self.localState[name]
def __contains__(self, name):
return self.localState and name in self.localState
class OTTableWriter(object):
"""Helper class to gather and assemble data for OpenType tables."""
def __init__(self, localState=None, tableTag=None, offsetSize=2):
self.items = []
self.pos = None
self.localState = localState
self.tableTag = tableTag
self.offsetSize = offsetSize
self.parent = None
# DEPRECATED: 'longOffset' is kept as a property for backward compat with old code.
# You should use 'offsetSize' instead (2, 3 or 4 bytes).
@property
def longOffset(self):
return self.offsetSize == 4
@longOffset.setter
def longOffset(self, value):
self.offsetSize = 4 if value else 2
def __setitem__(self, name, value):
state = self.localState.copy() if self.localState else dict()
state[name] = value
self.localState = state
def __getitem__(self, name):
return self.localState[name]
def __delitem__(self, name):
del self.localState[name]
# assembler interface
def getDataLength(self):
"""Return the length of this table in bytes, without subtables."""
l = 0
for item in self.items:
if hasattr(item, "getCountData"):
l += item.size
elif hasattr(item, "getData"):
l += item.offsetSize
else:
l = l + len(item)
return l
def getData(self):
"""Assemble the data for this writer/table, without subtables."""
items = list(self.items) # make a shallow copy
pos = self.pos
numItems = len(items)
for i in range(numItems):
item = items[i]
if hasattr(item, "getData"):
if item.offsetSize == 4:
items[i] = packULong(item.pos - pos)
elif item.offsetSize == 2:
try:
items[i] = packUShort(item.pos - pos)
except struct.error:
# provide data to fix overflow problem.
overflowErrorRecord = self.getOverflowErrorRecord(item)
raise OTLOffsetOverflowError(overflowErrorRecord)
elif item.offsetSize == 3:
items[i] = packUInt24(item.pos - pos)
else:
raise ValueError(item.offsetSize)
return bytesjoin(items)
def __hash__(self):
# only works after self._doneWriting() has been called
return hash(self.items)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.offsetSize == other.offsetSize and self.items == other.items
def _doneWriting(self, internedTables):
# Convert CountData references to data string items
# collapse duplicate table references to a unique entry
# "tables" are OTTableWriter objects.
# For Extension Lookup types, we can
# eliminate duplicates only within the tree under the Extension Lookup,
# as offsets may exceed 64K even between Extension LookupTable subtables.
isExtension = hasattr(self, "Extension")
# Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
# arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
# empty, array. So, we don't share those.
# See: https://github.com/fonttools/fonttools/issues/518
dontShare = hasattr(self, 'DontShare')
if isExtension:
internedTables = {}
items = self.items
for i in range(len(items)):
item = items[i]
if hasattr(item, "getCountData"):
items[i] = item.getCountData()
elif hasattr(item, "getData"):
item._doneWriting(internedTables)
if not dontShare:
items[i] = item = internedTables.setdefault(item, item)
self.items = tuple(items)
def _gatherTables(self, tables, extTables, done):
# Convert table references in self.items tree to a flat
# list of tables in depth-first traversal order.
# "tables" are OTTableWriter objects.
# We do the traversal in reverse order at each level, in order to
# resolve duplicate references to be the last reference in the list of tables.
# For extension lookups, duplicate references can be merged only within the
# writer tree under the extension lookup.
done[id(self)] = True
numItems = len(self.items)
iRange = list(range(numItems))
iRange.reverse()
isExtension = hasattr(self, "Extension")
selfTables = tables
if isExtension:
assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
tables, extTables, done = extTables, None, {}
# add Coverage table if it is sorted last.
sortCoverageLast = 0
if hasattr(self, "sortCoverageLast"):
# Find coverage table
for i in range(numItems):
item = self.items[i]
if hasattr(item, "name") and (item.name == "Coverage"):
sortCoverageLast = 1
break
if id(item) not in done:
item._gatherTables(tables, extTables, done)
else:
# We're a new parent of item
pass
for i in iRange:
item = self.items[i]
if not hasattr(item, "getData"):
continue
if sortCoverageLast and (i==1) and item.name == 'Coverage':
# we've already 'gathered' it above
continue
if id(item) not in done:
item._gatherTables(tables, extTables, done)
else:
# Item is already written out by other parent
pass
selfTables.append(self)
def getAllData(self):
"""Assemble all data, including all subtables."""
internedTables = {}
self._doneWriting(internedTables)
tables = []
extTables = []
done = {}
self._gatherTables(tables, extTables, done)
tables.reverse()
extTables.reverse()
# Gather all data in two passes: the absolute positions of all
# subtable are needed before the actual data can be assembled.
pos = 0
for table in tables:
table.pos = pos
pos = pos + table.getDataLength()
for table in extTables:
table.pos = pos
pos = pos + table.getDataLength()
data = []
for table in tables:
tableData = table.getData()
data.append(tableData)
for table in extTables:
tableData = table.getData()
data.append(tableData)
return bytesjoin(data)
# interface for gathering data, as used by table.compile()
def getSubWriter(self, offsetSize=2):
subwriter = self.__class__(self.localState, self.tableTag, offsetSize=offsetSize)
subwriter.parent = self # because some subtables have idential values, we discard
# the duplicates under the getAllData method. Hence some
# subtable writers can have more than one parent writer.
# But we just care about first one right now.
return subwriter
def writeValue(self, typecode, value):
self.items.append(struct.pack(f">{typecode}", value))
def writeUShort(self, value):
assert 0 <= value < 0x10000, value
self.items.append(struct.pack(">H", value))
def writeShort(self, value):
assert -32768 <= value < 32768, value
self.items.append(struct.pack(">h", value))
def writeUInt8(self, value):
assert 0 <= value < 256, value
self.items.append(struct.pack(">B", value))
def writeInt8(self, value):
assert -128 <= value < 128, value
self.items.append(struct.pack(">b", value))
def writeUInt24(self, value):
assert 0 <= value < 0x1000000, value
b = struct.pack(">L", value)
self.items.append(b[1:])
def writeLong(self, value):
self.items.append(struct.pack(">l", value))
def writeULong(self, value):
self.items.append(struct.pack(">L", value))
def writeTag(self, tag):
tag = Tag(tag).tobytes()
assert len(tag) == 4, tag
self.items.append(tag)
def writeSubTable(self, subWriter):
self.items.append(subWriter)
def writeCountReference(self, table, name, size=2, value=None):
ref = CountReference(table, name, size=size, value=value)
self.items.append(ref)
return ref
def writeStruct(self, format, values):
data = struct.pack(*(format,) + values)
self.items.append(data)
def writeData(self, data):
self.items.append(data)
def getOverflowErrorRecord(self, item):
LookupListIndex = SubTableIndex = itemName = itemIndex = None
if self.name == 'LookupList':
LookupListIndex = item.repeatIndex
elif self.name == 'Lookup':
LookupListIndex = self.repeatIndex
SubTableIndex = item.repeatIndex
else:
itemName = getattr(item, 'name', '<none>')
if hasattr(item, 'repeatIndex'):
itemIndex = item.repeatIndex
if self.name == 'SubTable':
LookupListIndex = self.parent.repeatIndex
SubTableIndex = self.repeatIndex
elif self.name == 'ExtSubTable':
LookupListIndex = self.parent.parent.repeatIndex
SubTableIndex = self.parent.repeatIndex
else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
itemName = ".".join([self.name, itemName])
p1 = self.parent
while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
itemName = ".".join([p1.name, itemName])
p1 = p1.parent
if p1:
if p1.name == 'ExtSubTable':
LookupListIndex = p1.parent.parent.repeatIndex
SubTableIndex = p1.parent.repeatIndex
else:
LookupListIndex = p1.parent.repeatIndex
SubTableIndex = p1.repeatIndex
return OverflowErrorRecord( (self.tableTag, LookupListIndex, SubTableIndex, itemName, itemIndex) )
class CountReference(object):
"""A reference to a Count value, not a count of references."""
def __init__(self, table, name, size=None, value=None):
self.table = table
self.name = name
self.size = size
if value is not None:
self.setValue(value)
def setValue(self, value):
table = self.table
name = self.name
if table[name] is None:
table[name] = value
else:
assert table[name] == value, (name, table[name], value)
def getValue(self):
return self.table[self.name]
def getCountData(self):
v = self.table[self.name]
if v is None: v = 0
return {1:packUInt8, 2:packUShort, 4:packULong}[self.size](v)
def packUInt8 (value):
return struct.pack(">B", value)
def packUShort(value):
return struct.pack(">H", value)
def packULong(value):
assert 0 <= value < 0x100000000, value
return struct.pack(">L", value)
def packUInt24(value):
assert 0 <= value < 0x1000000, value
return struct.pack(">L", value)[1:]
class BaseTable(object):
"""Generic base class for all OpenType (sub)tables."""
def __getattr__(self, attr):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
return getattr(self, attr)
raise AttributeError(attr)
def ensureDecompiled(self):
reader = self.__dict__.get("reader")
if reader:
del self.reader
font = self.font
del self.font
self.decompile(reader, font)
@classmethod
def getRecordSize(cls, reader):
totalSize = 0
for conv in cls.converters:
size = conv.getRecordSize(reader)
if size is NotImplemented: return NotImplemented
countValue = 1
if conv.repeat:
if conv.repeat in reader:
countValue = reader[conv.repeat] + conv.aux
else:
return NotImplemented
totalSize += size * countValue
return totalSize
def getConverters(self):
return self.converters
def getConverterByName(self, name):
return self.convertersByName[name]
def populateDefaults(self, propagator=None):
for conv in self.getConverters():
if conv.repeat:
if not hasattr(self, conv.name):
setattr(self, conv.name, [])
countValue = len(getattr(self, conv.name)) - conv.aux
try:
count_conv = self.getConverterByName(conv.repeat)
setattr(self, conv.repeat, countValue)
except KeyError:
# conv.repeat is a propagated count
if propagator and conv.repeat in propagator:
propagator[conv.repeat].setValue(countValue)
else:
if conv.aux and not eval(conv.aux, None, self.__dict__):
continue
if hasattr(self, conv.name):
continue # Warn if it should NOT be present?!
if hasattr(conv, 'writeNullOffset'):
setattr(self, conv.name, None) # Warn?
#elif not conv.isCount:
# # Warn?
# pass
def decompile(self, reader, font):
self.readFormat(reader)
table = {}
self.__rawTable = table # for debugging
for conv in self.getConverters():
if conv.name == "SubTable":
conv = conv.getConverter(reader.tableTag,
table["LookupType"])
if conv.name == "ExtSubTable":
conv = conv.getConverter(reader.tableTag,
table["ExtensionLookupType"])
if conv.name == "FeatureParams":
conv = conv.getConverter(reader["FeatureTag"])
if conv.name == "SubStruct":
conv = conv.getConverter(reader.tableTag,
table["MorphType"])
try:
if conv.repeat:
if isinstance(conv.repeat, int):
countValue = conv.repeat
elif conv.repeat in table:
countValue = table[conv.repeat]
else:
# conv.repeat is a propagated count
countValue = reader[conv.repeat]
countValue += conv.aux
table[conv.name] = conv.readArray(reader, font, table, countValue)
else:
if conv.aux and not eval(conv.aux, None, table):
continue
table[conv.name] = conv.read(reader, font, table)
if conv.isPropagated:
reader[conv.name] = table[conv.name]
except Exception as e:
name = conv.name
e.args = e.args + (name,)
raise
if hasattr(self, 'postRead'):
self.postRead(table, font)
else:
self.__dict__.update(table)
del self.__rawTable # succeeded, get rid of debugging info
def compile(self, writer, font):
self.ensureDecompiled()
# TODO Following hack to be removed by rewriting how FormatSwitching tables
# are handled.
# https://github.com/fonttools/fonttools/pull/2238#issuecomment-805192631
if hasattr(self, 'preWrite'):
deleteFormat = not hasattr(self, 'Format')
table = self.preWrite(font)
deleteFormat = deleteFormat and hasattr(self, 'Format')
else:
deleteFormat = False
table = self.__dict__.copy()
# some count references may have been initialized in a custom preWrite; we set
# these in the writer's state beforehand (instead of sequentially) so they will
# be propagated to all nested subtables even if the count appears in the current
# table only *after* the offset to the subtable that it is counting.
for conv in self.getConverters():
if conv.isCount and conv.isPropagated:
value = table.get(conv.name)
if isinstance(value, CountReference):
writer[conv.name] = value
if hasattr(self, 'sortCoverageLast'):
writer.sortCoverageLast = 1
if hasattr(self, 'DontShare'):
writer.DontShare = True
if hasattr(self.__class__, 'LookupType'):
writer['LookupType'].setValue(self.__class__.LookupType)
self.writeFormat(writer)
for conv in self.getConverters():
value = table.get(conv.name) # TODO Handle defaults instead of defaulting to None!
if conv.repeat:
if value is None:
value = []
countValue = len(value) - conv.aux
if isinstance(conv.repeat, int):
assert len(value) == conv.repeat, 'expected %d values, got %d' % (conv.repeat, len(value))
elif conv.repeat in table:
CountReference(table, conv.repeat, value=countValue)
else:
# conv.repeat is a propagated count
writer[conv.repeat].setValue(countValue)
values = value
for i, value in enumerate(values):
try:
conv.write(writer, font, table, value, i)
except Exception as e:
name = value.__class__.__name__ if value is not None else conv.name
e.args = e.args + (name+'['+str(i)+']',)
raise
elif conv.isCount:
# Special-case Count values.
# Assumption: a Count field will *always* precede
# the actual array(s).
# We need a default value, as it may be set later by a nested
# table. We will later store it here.
# We add a reference: by the time the data is assembled
# the Count value will be filled in.
# We ignore the current count value since it will be recomputed,
# unless it's a CountReference that was already initialized in a custom preWrite.
if isinstance(value, CountReference):
ref = value
ref.size = conv.staticSize
writer.writeData(ref)
table[conv.name] = ref.getValue()
else:
ref = writer.writeCountReference(table, conv.name, conv.staticSize)
table[conv.name] = None
if conv.isPropagated:
writer[conv.name] = ref
elif conv.isLookupType:
# We make sure that subtables have the same lookup type,
# and that the type is the same as the one set on the
# Lookup object, if any is set.
if conv.name not in table:
table[conv.name] = None
ref = writer.writeCountReference(table, conv.name, conv.staticSize, table[conv.name])
writer['LookupType'] = ref
else:
if conv.aux and not eval(conv.aux, None, table):
continue
try:
conv.write(writer, font, table, value)
except Exception as e:
name = value.__class__.__name__ if value is not None else conv.name
e.args = e.args + (name,)
raise
if conv.isPropagated:
writer[conv.name] = value
if deleteFormat:
del self.Format
def readFormat(self, reader):
pass
def writeFormat(self, writer):
pass
def toXML(self, xmlWriter, font, attrs=None, name=None):
tableName = name if name else self.__class__.__name__
if attrs is None:
attrs = []
if hasattr(self, "Format"):
attrs = attrs + [("Format", self.Format)]
xmlWriter.begintag(tableName, attrs)
xmlWriter.newline()
self.toXML2(xmlWriter, font)
xmlWriter.endtag(tableName)
xmlWriter.newline()
def toXML2(self, xmlWriter, font):
# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
# This is because in TTX our parent writes our main tag, and in otBase.py we
# do it ourselves. I think I'm getting schizophrenic...
for conv in self.getConverters():
if conv.repeat:
value = getattr(self, conv.name, [])
for i in range(len(value)):
item = value[i]
conv.xmlWrite(xmlWriter, font, item, conv.name,
[("index", i)])
else:
if conv.aux and not eval(conv.aux, None, vars(self)):
continue
value = getattr(self, conv.name, None) # TODO Handle defaults instead of defaulting to None!
conv.xmlWrite(xmlWriter, font, value, conv.name, [])
def fromXML(self, name, attrs, content, font):
try:
conv = self.getConverterByName(name)
except KeyError:
raise # XXX on KeyError, raise nice error
value = conv.xmlRead(attrs, content, font)
if conv.repeat:
seq = getattr(self, conv.name, None)
if seq is None:
seq = []
setattr(self, conv.name, seq)
seq.append(value)
else:
setattr(self, conv.name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
self.ensureDecompiled()
other.ensureDecompiled()
return self.__dict__ == other.__dict__
class FormatSwitchingBaseTable(BaseTable):
"""Minor specialization of BaseTable, for tables that have multiple
formats, eg. CoverageFormat1 vs. CoverageFormat2."""
@classmethod
def getRecordSize(cls, reader):
return NotImplemented
def getConverters(self):
return self.converters.get(self.Format, [])
def getConverterByName(self, name):
return self.convertersByName[self.Format][name]
def readFormat(self, reader):
self.Format = reader.readUShort()
def writeFormat(self, writer):
writer.writeUShort(self.Format)
def toXML(self, xmlWriter, font, attrs=None, name=None):
BaseTable.toXML(self, xmlWriter, font, attrs, name)
class UInt8FormatSwitchingBaseTable(FormatSwitchingBaseTable):
def readFormat(self, reader):
self.Format = reader.readUInt8()
def writeFormat(self, writer):
writer.writeUInt8(self.Format)
formatSwitchingBaseTables = {
"uint16": FormatSwitchingBaseTable,
"uint8": UInt8FormatSwitchingBaseTable,
}
def getFormatSwitchingBaseTableClass(formatType):
try:
return formatSwitchingBaseTables[formatType]
except KeyError:
raise TypeError(f"Unsupported format type: {formatType!r}")
#
# Support for ValueRecords
#
# This data type is so different from all other OpenType data types that
# it requires quite a bit of code for itself. It even has special support
# in OTTableReader and OTTableWriter...
#
valueRecordFormat = [
# Mask Name isDevice signed
(0x0001, "XPlacement", 0, 1),
(0x0002, "YPlacement", 0, 1),
(0x0004, "XAdvance", 0, 1),
(0x0008, "YAdvance", 0, 1),
(0x0010, "XPlaDevice", 1, 0),
(0x0020, "YPlaDevice", 1, 0),
(0x0040, "XAdvDevice", 1, 0),
(0x0080, "YAdvDevice", 1, 0),
# reserved:
(0x0100, "Reserved1", 0, 0),
(0x0200, "Reserved2", 0, 0),
(0x0400, "Reserved3", 0, 0),
(0x0800, "Reserved4", 0, 0),
(0x1000, "Reserved5", 0, 0),
(0x2000, "Reserved6", 0, 0),
(0x4000, "Reserved7", 0, 0),
(0x8000, "Reserved8", 0, 0),
]
def _buildDict():
d = {}
for mask, name, isDevice, signed in valueRecordFormat:
d[name] = mask, isDevice, signed
return d
valueRecordFormatDict = _buildDict()
class ValueRecordFactory(object):
"""Given a format code, this object convert ValueRecords."""
def __init__(self, valueFormat):
format = []
for mask, name, isDevice, signed in valueRecordFormat:
if valueFormat & mask:
format.append((name, isDevice, signed))
self.format = format
def __len__(self):
return len(self.format)
def readValueRecord(self, reader, font):
format = self.format
if not format:
return None
valueRecord = ValueRecord()
for name, isDevice, signed in format:
if signed:
value = reader.readShort()
else:
value = reader.readUShort()
if isDevice:
if value:
from . import otTables
subReader = reader.getSubReader(value)
value = getattr(otTables, name)()
value.decompile(subReader, font)
else:
value = None
setattr(valueRecord, name, value)
return valueRecord
def writeValueRecord(self, writer, font, valueRecord):
for name, isDevice, signed in self.format:
value = getattr(valueRecord, name, 0)
if isDevice:
if value:
subWriter = writer.getSubWriter()
writer.writeSubTable(subWriter)
value.compile(subWriter, font)
else:
writer.writeUShort(0)
elif signed:
writer.writeShort(value)
else:
writer.writeUShort(value)
class ValueRecord(object):
# see ValueRecordFactory
def __init__(self, valueFormat=None, src=None):
if valueFormat is not None:
for mask, name, isDevice, signed in valueRecordFormat:
if valueFormat & mask:
setattr(self, name, None if isDevice else 0)
if src is not None:
for key,val in src.__dict__.items():
if not hasattr(self, key):
continue
setattr(self, key, val)
elif src is not None:
self.__dict__ = src.__dict__.copy()
def getFormat(self):
format = 0
for name in self.__dict__.keys():
format = format | valueRecordFormatDict[name][0]
return format
def getEffectiveFormat(self):
format = 0
for name,value in self.__dict__.items():
if value:
format = format | valueRecordFormatDict[name][0]
return format
def toXML(self, xmlWriter, font, valueName, attrs=None):
if attrs is None:
simpleItems = []
else:
simpleItems = list(attrs)
for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values
if hasattr(self, name):
simpleItems.append((name, getattr(self, name)))
deviceItems = []
for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records
if hasattr(self, name):
device = getattr(self, name)
if device is not None:
deviceItems.append((name, device))
if deviceItems:
xmlWriter.begintag(valueName, simpleItems)
xmlWriter.newline()
for name, deviceRecord in deviceItems:
if deviceRecord is not None:
deviceRecord.toXML(xmlWriter, font, name=name)
xmlWriter.endtag(valueName)
xmlWriter.newline()
else:
xmlWriter.simpletag(valueName, simpleItems)
xmlWriter.newline()
def fromXML(self, name, attrs, content, font):
from . import otTables
for k, v in attrs.items():
setattr(self, k, int(v))
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
value = getattr(otTables, name)()
for elem2 in content:
if not isinstance(elem2, tuple):
continue
name2, attrs2, content2 = elem2
value.fromXML(name2, attrs2, content2, font)
setattr(self, name, value)
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.__dict__ == other.__dict__
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/otBase.py",
"copies": "2",
"size": "30335",
"license": "apache-2.0",
"hash": 9176469849200878000,
"line_mean": 28.4800777454,
"line_max": 164,
"alpha_frac": 0.6912147684,
"autogenerated": false,
"ratio": 3.2271276595744682,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757812891782077,
"avg_score": 0.032105907238478164,
"num_lines": 1029
} |
from fontTools.misc.py23 import Tag, bytesjoin
from fontTools.misc import sstruct
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi,
floatToFixedToStr as fl2str,
strToFixedToFloat as str2fl,
)
from fontTools.misc.textTools import safeEval
from fontTools.ttLib import TTLibError
from . import DefaultTable
import struct
# Apple's documentation of 'fvar':
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html
FVAR_HEADER_FORMAT = """
> # big endian
version: L
offsetToData: H
countSizePairs: H
axisCount: H
axisSize: H
instanceCount: H
instanceSize: H
"""
FVAR_AXIS_FORMAT = """
> # big endian
axisTag: 4s
minValue: 16.16F
defaultValue: 16.16F
maxValue: 16.16F
flags: H
axisNameID: H
"""
FVAR_INSTANCE_FORMAT = """
> # big endian
subfamilyNameID: H
flags: H
"""
class table__f_v_a_r(DefaultTable.DefaultTable):
dependencies = ["name"]
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.axes = []
self.instances = []
def compile(self, ttFont):
instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4)
includePostScriptNames = any(instance.postscriptNameID != 0xFFFF
for instance in self.instances)
if includePostScriptNames:
instanceSize += 2
header = {
"version": 0x00010000,
"offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT),
"countSizePairs": 2,
"axisCount": len(self.axes),
"axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT),
"instanceCount": len(self.instances),
"instanceSize": instanceSize,
}
result = [sstruct.pack(FVAR_HEADER_FORMAT, header)]
result.extend([axis.compile() for axis in self.axes])
axisTags = [axis.axisTag for axis in self.axes]
for instance in self.instances:
result.append(instance.compile(axisTags, includePostScriptNames))
return bytesjoin(result)
def decompile(self, data, ttFont):
header = {}
headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT)
header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize])
if header["version"] != 0x00010000:
raise TTLibError("unsupported 'fvar' version %04x" % header["version"])
pos = header["offsetToData"]
axisSize = header["axisSize"]
for _ in range(header["axisCount"]):
axis = Axis()
axis.decompile(data[pos:pos+axisSize])
self.axes.append(axis)
pos += axisSize
instanceSize = header["instanceSize"]
axisTags = [axis.axisTag for axis in self.axes]
for _ in range(header["instanceCount"]):
instance = NamedInstance()
instance.decompile(data[pos:pos+instanceSize], axisTags)
self.instances.append(instance)
pos += instanceSize
def toXML(self, writer, ttFont):
for axis in self.axes:
axis.toXML(writer, ttFont)
for instance in self.instances:
instance.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "Axis":
axis = Axis()
axis.fromXML(name, attrs, content, ttFont)
self.axes.append(axis)
elif name == "NamedInstance":
instance = NamedInstance()
instance.fromXML(name, attrs, content, ttFont)
self.instances.append(instance)
class Axis(object):
def __init__(self):
self.axisTag = None
self.axisNameID = 0
self.flags = 0
self.minValue = -1.0
self.defaultValue = 0.0
self.maxValue = 1.0
def compile(self):
return sstruct.pack(FVAR_AXIS_FORMAT, self)
def decompile(self, data):
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
def toXML(self, writer, ttFont):
name = ttFont["name"].getDebugName(self.axisNameID)
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
writer.begintag("Axis")
writer.newline()
for tag, value in [("AxisTag", self.axisTag),
("Flags", "0x%X" % self.flags),
("MinValue", fl2str(self.minValue, 16)),
("DefaultValue", fl2str(self.defaultValue, 16)),
("MaxValue", fl2str(self.maxValue, 16)),
("AxisNameID", str(self.axisNameID))]:
writer.begintag(tag)
writer.write(value)
writer.endtag(tag)
writer.newline()
writer.endtag("Axis")
writer.newline()
def fromXML(self, name, _attrs, content, ttFont):
assert(name == "Axis")
for tag, _, value in filter(lambda t: type(t) is tuple, content):
value = ''.join(value)
if tag == "AxisTag":
self.axisTag = Tag(value)
elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue",
"AxisNameID"}:
setattr(
self,
tag[0].lower() + tag[1:],
str2fl(value, 16) if tag.endswith("Value") else safeEval(value)
)
class NamedInstance(object):
def __init__(self):
self.subfamilyNameID = 0
self.postscriptNameID = 0xFFFF
self.flags = 0
self.coordinates = {}
def compile(self, axisTags, includePostScriptName):
result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)]
for axis in axisTags:
fixedCoord = fl2fi(self.coordinates[axis], 16)
result.append(struct.pack(">l", fixedCoord))
if includePostScriptName:
result.append(struct.pack(">H", self.postscriptNameID))
return bytesjoin(result)
def decompile(self, data, axisTags):
sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self)
pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT)
for axis in axisTags:
value = struct.unpack(">l", data[pos : pos + 4])[0]
self.coordinates[axis] = fi2fl(value, 16)
pos += 4
if pos + 2 <= len(data):
self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
else:
self.postscriptNameID = 0xFFFF
def toXML(self, writer, ttFont):
name = ttFont["name"].getDebugName(self.subfamilyNameID)
if name is not None:
writer.newline()
writer.comment(name)
writer.newline()
psname = ttFont["name"].getDebugName(self.postscriptNameID)
if psname is not None:
writer.comment(u"PostScript: " + psname)
writer.newline()
if self.postscriptNameID == 0xFFFF:
writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
subfamilyNameID=self.subfamilyNameID)
else:
writer.begintag("NamedInstance", flags=("0x%X" % self.flags),
subfamilyNameID=self.subfamilyNameID,
postscriptNameID=self.postscriptNameID, )
writer.newline()
for axis in ttFont["fvar"].axes:
writer.simpletag("coord", axis=axis.axisTag,
value=fl2str(self.coordinates[axis.axisTag], 16))
writer.newline()
writer.endtag("NamedInstance")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
assert(name == "NamedInstance")
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
self.flags = safeEval(attrs.get("flags", "0"))
if "postscriptNameID" in attrs:
self.postscriptNameID = safeEval(attrs["postscriptNameID"])
else:
self.postscriptNameID = 0xFFFF
for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content):
if tag == "coord":
value = str2fl(elementAttrs["value"], 16)
self.coordinates[elementAttrs["axis"]] = value
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_f_v_a_r.py",
"copies": "5",
"size": "8289",
"license": "apache-2.0",
"hash": 5698246678535751000,
"line_mean": 35.3552631579,
"line_max": 84,
"alpha_frac": 0.5732899023,
"autogenerated": false,
"ratio": 3.8733644859813086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6946654388281309,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import Tag, bytesjoin, strjoin
try:
import xattr
except ImportError:
xattr = None
def _reverseString(s):
s = list(s)
s.reverse()
return strjoin(s)
def getMacCreatorAndType(path):
"""Returns file creator and file type codes for a path.
Args:
path (str): A file path.
Returns:
A tuple of two :py:class:`fontTools.py23.Tag` objects, the first
representing the file creator and the second representing the
file type.
"""
if xattr is not None:
try:
finderInfo = xattr.getxattr(path, 'com.apple.FinderInfo')
except (KeyError, IOError):
pass
else:
fileType = Tag(finderInfo[:4])
fileCreator = Tag(finderInfo[4:8])
return fileCreator, fileType
return None, None
def setMacCreatorAndType(path, fileCreator, fileType):
"""Set file creator and file type codes for a path.
Note that if the ``xattr`` module is not installed, no action is
taken but no error is raised.
Args:
path (str): A file path.
fileCreator: A four-character file creator tag.
fileType: A four-character file type tag.
"""
if xattr is not None:
from fontTools.misc.textTools import pad
if not all(len(s) == 4 for s in (fileCreator, fileType)):
raise TypeError('arg must be string of 4 chars')
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
xattr.setxattr(path, 'com.apple.FinderInfo', finderInfo)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/misc/macCreatorType.py",
"copies": "5",
"size": "1364",
"license": "apache-2.0",
"hash": 5437882595333263000,
"line_mean": 24.2592592593,
"line_max": 66,
"alpha_frac": 0.7170087977,
"autogenerated": false,
"ratio": 3.142857142857143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6359865940557143,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import Tag
from fontTools.ttLib import getClassTag
class DefaultTable(object):
dependencies = []
def __init__(self, tag=None):
if tag is None:
tag = getClassTag(self.__class__)
self.tableTag = Tag(tag)
def decompile(self, data, ttFont):
self.data = data
def compile(self, ttFont):
return self.data
def toXML(self, writer, ttFont, **kwargs):
if hasattr(self, "ERROR"):
writer.comment("An error occurred during the decompilation of this table")
writer.newline()
writer.comment(self.ERROR)
writer.newline()
writer.begintag("hexdata")
writer.newline()
writer.dumphex(self.compile(ttFont))
writer.endtag("hexdata")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
from fontTools.misc.textTools import readHex
from fontTools import ttLib
if name != "hexdata":
raise ttLib.TTLibError("can't handle '%s' element" % name)
self.decompile(readHex(content), ttFont)
def __repr__(self):
return "<'%s' table at %x>" % (self.tableTag, id(self))
def __eq__(self, other):
if type(self) != type(other):
return NotImplemented
return self.__dict__ == other.__dict__
def __ne__(self, other):
result = self.__eq__(other)
return result if result is NotImplemented else not result
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/DefaultTable.py",
"copies": "5",
"size": "1277",
"license": "apache-2.0",
"hash": -5388092236058260000,
"line_mean": 25.6041666667,
"line_max": 77,
"alpha_frac": 0.6906812843,
"autogenerated": false,
"ratio": 3.1299019607843137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6320583245084314,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import Tag, tostr
from fontTools.misc import sstruct
from fontTools.misc.textTools import binary2num, safeEval
from fontTools.feaLib.error import FeatureLibError
from fontTools.feaLib.lookupDebugInfo import (
LookupDebugInfo,
LOOKUP_DEBUG_INFO_KEY,
LOOKUP_DEBUG_ENV_VAR,
)
from fontTools.feaLib.parser import Parser
from fontTools.feaLib.ast import FeatureFile
from fontTools.otlLib import builder as otl
from fontTools.otlLib.maxContextCalc import maxCtxFont
from fontTools.ttLib import newTable, getTableModule
from fontTools.ttLib.tables import otBase, otTables
from fontTools.otlLib.builder import (
AlternateSubstBuilder,
ChainContextPosBuilder,
ChainContextSubstBuilder,
LigatureSubstBuilder,
MultipleSubstBuilder,
CursivePosBuilder,
MarkBasePosBuilder,
MarkLigPosBuilder,
MarkMarkPosBuilder,
ReverseChainSingleSubstBuilder,
SingleSubstBuilder,
ClassPairPosSubtableBuilder,
PairPosBuilder,
SinglePosBuilder,
ChainContextualRule,
)
from fontTools.otlLib.error import OpenTypeLibError
from collections import defaultdict
import itertools
from io import StringIO
import logging
import warnings
import os
log = logging.getLogger(__name__)
def addOpenTypeFeatures(font, featurefile, tables=None, debug=False):
"""Add features from a file to a font. Note that this replaces any features
currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
featurefile: Either a path or file object (in which case we
parse it into an AST), or a pre-parsed AST instance.
tables: If passed, restrict the set of affected tables to those in the
list.
debug: Whether to add source debugging information to the font in the
``Debg`` table
"""
builder = Builder(font, featurefile)
builder.build(tables=tables, debug=debug)
def addOpenTypeFeaturesFromString(
font, features, filename=None, tables=None, debug=False
):
"""Add features from a string to a font. Note that this replaces any
features currently present.
Args:
font (feaLib.ttLib.TTFont): The font object.
features: A string containing feature code.
filename: The directory containing ``filename`` is used as the root of
relative ``include()`` paths; if ``None`` is provided, the current
directory is assumed.
tables: If passed, restrict the set of affected tables to those in the
list.
debug: Whether to add source debugging information to the font in the
``Debg`` table
"""
featurefile = StringIO(tostr(features))
if filename:
featurefile.name = filename
addOpenTypeFeatures(font, featurefile, tables=tables, debug=debug)
class Builder(object):
supportedTables = frozenset(
Tag(tag)
for tag in [
"BASE",
"GDEF",
"GPOS",
"GSUB",
"OS/2",
"head",
"hhea",
"name",
"vhea",
"STAT",
]
)
def __init__(self, font, featurefile):
self.font = font
# 'featurefile' can be either a path or file object (in which case we
# parse it into an AST), or a pre-parsed AST instance
if isinstance(featurefile, FeatureFile):
self.parseTree, self.file = featurefile, None
else:
self.parseTree, self.file = None, featurefile
self.glyphMap = font.getReverseGlyphMap()
self.default_language_systems_ = set()
self.script_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.language_systems = set()
self.seen_non_DFLT_script_ = False
self.named_lookups_ = {}
self.cur_lookup_ = None
self.cur_lookup_name_ = None
self.cur_feature_name_ = None
self.lookups_ = []
self.lookup_locations = {"GSUB": {}, "GPOS": {}}
self.features_ = {} # ('latn', 'DEU ', 'smcp') --> [LookupBuilder*]
self.required_features_ = {} # ('latn', 'DEU ') --> 'scmp'
# for feature 'aalt'
self.aalt_features_ = [] # [(location, featureName)*], for 'aalt'
self.aalt_location_ = None
self.aalt_alternates_ = {}
# for 'featureNames'
self.featureNames_ = set()
self.featureNames_ids_ = {}
# for 'cvParameters'
self.cv_parameters_ = set()
self.cv_parameters_ids_ = {}
self.cv_num_named_params_ = {}
self.cv_characters_ = defaultdict(list)
# for feature 'size'
self.size_parameters_ = None
# for table 'head'
self.fontRevision_ = None # 2.71
# for table 'name'
self.names_ = []
# for table 'BASE'
self.base_horiz_axis_ = None
self.base_vert_axis_ = None
# for table 'GDEF'
self.attachPoints_ = {} # "a" --> {3, 7}
self.ligCaretCoords_ = {} # "f_f_i" --> {300, 600}
self.ligCaretPoints_ = {} # "f_f_i" --> {3, 7}
self.glyphClassDefs_ = {} # "fi" --> (2, (file, line, column))
self.markAttach_ = {} # "acute" --> (4, (file, line, column))
self.markAttachClassID_ = {} # frozenset({"acute", "grave"}) --> 4
self.markFilterSets_ = {} # frozenset({"acute", "grave"}) --> 4
# for table 'OS/2'
self.os2_ = {}
# for table 'hhea'
self.hhea_ = {}
# for table 'vhea'
self.vhea_ = {}
# for table 'STAT'
self.stat_ = {}
def build(self, tables=None, debug=False):
if self.parseTree is None:
self.parseTree = Parser(self.file, self.glyphMap).parse()
self.parseTree.build(self)
# by default, build all the supported tables
if tables is None:
tables = self.supportedTables
else:
tables = frozenset(tables)
unsupported = tables - self.supportedTables
if unsupported:
unsupported_string = ", ".join(sorted(unsupported))
raise NotImplementedError(
"The following tables were requested but are unsupported: "
f"{unsupported_string}."
)
if "GSUB" in tables:
self.build_feature_aalt_()
if "head" in tables:
self.build_head()
if "hhea" in tables:
self.build_hhea()
if "vhea" in tables:
self.build_vhea()
if "name" in tables:
self.build_name()
if "OS/2" in tables:
self.build_OS_2()
if "STAT" in tables:
self.build_STAT()
for tag in ("GPOS", "GSUB"):
if tag not in tables:
continue
table = self.makeTable(tag)
if (
table.ScriptList.ScriptCount > 0
or table.FeatureList.FeatureCount > 0
or table.LookupList.LookupCount > 0
):
fontTable = self.font[tag] = newTable(tag)
fontTable.table = table
elif tag in self.font:
del self.font[tag]
if any(tag in self.font for tag in ("GPOS", "GSUB")) and "OS/2" in self.font:
self.font["OS/2"].usMaxContext = maxCtxFont(self.font)
if "GDEF" in tables:
gdef = self.buildGDEF()
if gdef:
self.font["GDEF"] = gdef
elif "GDEF" in self.font:
del self.font["GDEF"]
if "BASE" in tables:
base = self.buildBASE()
if base:
self.font["BASE"] = base
elif "BASE" in self.font:
del self.font["BASE"]
if debug or os.environ.get(LOOKUP_DEBUG_ENV_VAR):
self.buildDebg()
def get_chained_lookup_(self, location, builder_class):
result = builder_class(self.font, location)
result.lookupflag = self.lookupflag_
result.markFilterSet = self.lookupflag_markFilterSet_
self.lookups_.append(result)
return result
def add_lookup_to_feature_(self, lookup, feature_name):
for script, lang in self.language_systems:
key = (script, lang, feature_name)
self.features_.setdefault(key, []).append(lookup)
def get_lookup_(self, location, builder_class):
if (
self.cur_lookup_
and type(self.cur_lookup_) == builder_class
and self.cur_lookup_.lookupflag == self.lookupflag_
and self.cur_lookup_.markFilterSet == self.lookupflag_markFilterSet_
):
return self.cur_lookup_
if self.cur_lookup_name_ and self.cur_lookup_:
raise FeatureLibError(
"Within a named lookup block, all rules must be of "
"the same lookup type and flag",
location,
)
self.cur_lookup_ = builder_class(self.font, location)
self.cur_lookup_.lookupflag = self.lookupflag_
self.cur_lookup_.markFilterSet = self.lookupflag_markFilterSet_
self.lookups_.append(self.cur_lookup_)
if self.cur_lookup_name_:
# We are starting a lookup rule inside a named lookup block.
self.named_lookups_[self.cur_lookup_name_] = self.cur_lookup_
if self.cur_feature_name_:
# We are starting a lookup rule inside a feature. This includes
# lookup rules inside named lookups inside features.
self.add_lookup_to_feature_(self.cur_lookup_, self.cur_feature_name_)
return self.cur_lookup_
def build_feature_aalt_(self):
if not self.aalt_features_ and not self.aalt_alternates_:
return
alternates = {g: set(a) for g, a in self.aalt_alternates_.items()}
for location, name in self.aalt_features_ + [(None, "aalt")]:
feature = [
(script, lang, feature, lookups)
for (script, lang, feature), lookups in self.features_.items()
if feature == name
]
# "aalt" does not have to specify its own lookups, but it might.
if not feature and name != "aalt":
raise FeatureLibError(
"Feature %s has not been defined" % name, location
)
for script, lang, feature, lookups in feature:
for lookuplist in lookups:
if not isinstance(lookuplist, list):
lookuplist = [lookuplist]
for lookup in lookuplist:
for glyph, alts in lookup.getAlternateGlyphs().items():
alternates.setdefault(glyph, set()).update(alts)
single = {
glyph: list(repl)[0] for glyph, repl in alternates.items() if len(repl) == 1
}
# TODO: Figure out the glyph alternate ordering used by makeotf.
# https://github.com/fonttools/fonttools/issues/836
multi = {
glyph: sorted(repl, key=self.font.getGlyphID)
for glyph, repl in alternates.items()
if len(repl) > 1
}
if not single and not multi:
return
self.features_ = {
(script, lang, feature): lookups
for (script, lang, feature), lookups in self.features_.items()
if feature != "aalt"
}
old_lookups = self.lookups_
self.lookups_ = []
self.start_feature(self.aalt_location_, "aalt")
if single:
single_lookup = self.get_lookup_(location, SingleSubstBuilder)
single_lookup.mapping = single
if multi:
multi_lookup = self.get_lookup_(location, AlternateSubstBuilder)
multi_lookup.alternates = multi
self.end_feature()
self.lookups_.extend(old_lookups)
def build_head(self):
if not self.fontRevision_:
return
table = self.font.get("head")
if not table: # this only happens for unit tests
table = self.font["head"] = newTable("head")
table.decompile(b"\0" * 54, self.font)
table.tableVersion = 1.0
table.created = table.modified = 3406620153 # 2011-12-13 11:22:33
table.fontRevision = self.fontRevision_
def build_hhea(self):
if not self.hhea_:
return
table = self.font.get("hhea")
if not table: # this only happens for unit tests
table = self.font["hhea"] = newTable("hhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00010000
if "caretoffset" in self.hhea_:
table.caretOffset = self.hhea_["caretoffset"]
if "ascender" in self.hhea_:
table.ascent = self.hhea_["ascender"]
if "descender" in self.hhea_:
table.descent = self.hhea_["descender"]
if "linegap" in self.hhea_:
table.lineGap = self.hhea_["linegap"]
def build_vhea(self):
if not self.vhea_:
return
table = self.font.get("vhea")
if not table: # this only happens for unit tests
table = self.font["vhea"] = newTable("vhea")
table.decompile(b"\0" * 36, self.font)
table.tableVersion = 0x00011000
if "verttypoascender" in self.vhea_:
table.ascent = self.vhea_["verttypoascender"]
if "verttypodescender" in self.vhea_:
table.descent = self.vhea_["verttypodescender"]
if "verttypolinegap" in self.vhea_:
table.lineGap = self.vhea_["verttypolinegap"]
def get_user_name_id(self, table):
# Try to find first unused font-specific name id
nameIDs = [name.nameID for name in table.names]
for user_name_id in range(256, 32767):
if user_name_id not in nameIDs:
return user_name_id
def buildFeatureParams(self, tag):
params = None
if tag == "size":
params = otTables.FeatureParamsSize()
(
params.DesignSize,
params.SubfamilyID,
params.RangeStart,
params.RangeEnd,
) = self.size_parameters_
if tag in self.featureNames_ids_:
params.SubfamilyNameID = self.featureNames_ids_[tag]
else:
params.SubfamilyNameID = 0
elif tag in self.featureNames_:
if not self.featureNames_ids_:
# name table wasn't selected among the tables to build; skip
pass
else:
assert tag in self.featureNames_ids_
params = otTables.FeatureParamsStylisticSet()
params.Version = 0
params.UINameID = self.featureNames_ids_[tag]
elif tag in self.cv_parameters_:
params = otTables.FeatureParamsCharacterVariants()
params.Format = 0
params.FeatUILabelNameID = self.cv_parameters_ids_.get(
(tag, "FeatUILabelNameID"), 0
)
params.FeatUITooltipTextNameID = self.cv_parameters_ids_.get(
(tag, "FeatUITooltipTextNameID"), 0
)
params.SampleTextNameID = self.cv_parameters_ids_.get(
(tag, "SampleTextNameID"), 0
)
params.NumNamedParameters = self.cv_num_named_params_.get(tag, 0)
params.FirstParamUILabelNameID = self.cv_parameters_ids_.get(
(tag, "ParamUILabelNameID_0"), 0
)
params.CharCount = len(self.cv_characters_[tag])
params.Character = self.cv_characters_[tag]
return params
def build_name(self):
if not self.names_:
return
table = self.font.get("name")
if not table: # this only happens for unit tests
table = self.font["name"] = newTable("name")
table.names = []
for name in self.names_:
nameID, platformID, platEncID, langID, string = name
# For featureNames block, nameID is 'feature tag'
# For cvParameters blocks, nameID is ('feature tag', 'block name')
if not isinstance(nameID, int):
tag = nameID
if tag in self.featureNames_:
if tag not in self.featureNames_ids_:
self.featureNames_ids_[tag] = self.get_user_name_id(table)
assert self.featureNames_ids_[tag] is not None
nameID = self.featureNames_ids_[tag]
elif tag[0] in self.cv_parameters_:
if tag not in self.cv_parameters_ids_:
self.cv_parameters_ids_[tag] = self.get_user_name_id(table)
assert self.cv_parameters_ids_[tag] is not None
nameID = self.cv_parameters_ids_[tag]
table.setName(string, nameID, platformID, platEncID, langID)
def build_OS_2(self):
if not self.os2_:
return
table = self.font.get("OS/2")
if not table: # this only happens for unit tests
table = self.font["OS/2"] = newTable("OS/2")
data = b"\0" * sstruct.calcsize(getTableModule("OS/2").OS2_format_0)
table.decompile(data, self.font)
version = 0
if "fstype" in self.os2_:
table.fsType = self.os2_["fstype"]
if "panose" in self.os2_:
panose = getTableModule("OS/2").Panose()
(
panose.bFamilyType,
panose.bSerifStyle,
panose.bWeight,
panose.bProportion,
panose.bContrast,
panose.bStrokeVariation,
panose.bArmStyle,
panose.bLetterForm,
panose.bMidline,
panose.bXHeight,
) = self.os2_["panose"]
table.panose = panose
if "typoascender" in self.os2_:
table.sTypoAscender = self.os2_["typoascender"]
if "typodescender" in self.os2_:
table.sTypoDescender = self.os2_["typodescender"]
if "typolinegap" in self.os2_:
table.sTypoLineGap = self.os2_["typolinegap"]
if "winascent" in self.os2_:
table.usWinAscent = self.os2_["winascent"]
if "windescent" in self.os2_:
table.usWinDescent = self.os2_["windescent"]
if "vendor" in self.os2_:
table.achVendID = safeEval("'''" + self.os2_["vendor"] + "'''")
if "weightclass" in self.os2_:
table.usWeightClass = self.os2_["weightclass"]
if "widthclass" in self.os2_:
table.usWidthClass = self.os2_["widthclass"]
if "unicoderange" in self.os2_:
table.setUnicodeRanges(self.os2_["unicoderange"])
if "codepagerange" in self.os2_:
pages = self.build_codepages_(self.os2_["codepagerange"])
table.ulCodePageRange1, table.ulCodePageRange2 = pages
version = 1
if "xheight" in self.os2_:
table.sxHeight = self.os2_["xheight"]
version = 2
if "capheight" in self.os2_:
table.sCapHeight = self.os2_["capheight"]
version = 2
if "loweropsize" in self.os2_:
table.usLowerOpticalPointSize = self.os2_["loweropsize"]
version = 5
if "upperopsize" in self.os2_:
table.usUpperOpticalPointSize = self.os2_["upperopsize"]
version = 5
def checkattr(table, attrs):
for attr in attrs:
if not hasattr(table, attr):
setattr(table, attr, 0)
table.version = max(version, table.version)
# this only happens for unit tests
if version >= 1:
checkattr(table, ("ulCodePageRange1", "ulCodePageRange2"))
if version >= 2:
checkattr(
table,
(
"sxHeight",
"sCapHeight",
"usDefaultChar",
"usBreakChar",
"usMaxContext",
),
)
if version >= 5:
checkattr(table, ("usLowerOpticalPointSize", "usUpperOpticalPointSize"))
def setElidedFallbackName(self, value, location):
# ElidedFallbackName is a convenience method for setting
# ElidedFallbackNameID so only one can be allowed
for token in ("ElidedFallbackName", "ElidedFallbackNameID"):
if token in self.stat_:
raise FeatureLibError(
f"{token} is already set.",
location,
)
if isinstance(value, int):
self.stat_["ElidedFallbackNameID"] = value
elif isinstance(value, list):
self.stat_["ElidedFallbackName"] = value
else:
raise AssertionError(value)
def addDesignAxis(self, designAxis, location):
if "DesignAxes" not in self.stat_:
self.stat_["DesignAxes"] = []
if designAxis.tag in (r.tag for r in self.stat_["DesignAxes"]):
raise FeatureLibError(
f'DesignAxis already defined for tag "{designAxis.tag}".',
location,
)
if designAxis.axisOrder in (r.axisOrder for r in self.stat_["DesignAxes"]):
raise FeatureLibError(
f"DesignAxis already defined for axis number {designAxis.axisOrder}.",
location,
)
self.stat_["DesignAxes"].append(designAxis)
def addAxisValueRecord(self, axisValueRecord, location):
if "AxisValueRecords" not in self.stat_:
self.stat_["AxisValueRecords"] = []
# Check for duplicate AxisValueRecords
for record_ in self.stat_["AxisValueRecords"]:
if (
{n.asFea() for n in record_.names}
== {n.asFea() for n in axisValueRecord.names}
and {n.asFea() for n in record_.locations}
== {n.asFea() for n in axisValueRecord.locations}
and record_.flags == axisValueRecord.flags
):
raise FeatureLibError(
"An AxisValueRecord with these values is already defined.",
location,
)
self.stat_["AxisValueRecords"].append(axisValueRecord)
def build_STAT(self):
if not self.stat_:
return
axes = self.stat_.get("DesignAxes")
if not axes:
raise FeatureLibError("DesignAxes not defined", None)
axisValueRecords = self.stat_.get("AxisValueRecords")
axisValues = {}
format4_locations = []
for tag in axes:
axisValues[tag.tag] = []
if axisValueRecords is not None:
for avr in axisValueRecords:
valuesDict = {}
if avr.flags > 0:
valuesDict["flags"] = avr.flags
if len(avr.locations) == 1:
location = avr.locations[0]
values = location.values
if len(values) == 1: # format1
valuesDict.update({"value": values[0], "name": avr.names})
if len(values) == 2: # format3
valuesDict.update(
{
"value": values[0],
"linkedValue": values[1],
"name": avr.names,
}
)
if len(values) == 3: # format2
nominal, minVal, maxVal = values
valuesDict.update(
{
"nominalValue": nominal,
"rangeMinValue": minVal,
"rangeMaxValue": maxVal,
"name": avr.names,
}
)
axisValues[location.tag].append(valuesDict)
else:
valuesDict.update(
{
"location": {i.tag: i.values[0] for i in avr.locations},
"name": avr.names,
}
)
format4_locations.append(valuesDict)
designAxes = [
{
"ordering": a.axisOrder,
"tag": a.tag,
"name": a.names,
"values": axisValues[a.tag],
}
for a in axes
]
nameTable = self.font.get("name")
if not nameTable: # this only happens for unit tests
nameTable = self.font["name"] = newTable("name")
nameTable.names = []
if "ElidedFallbackNameID" in self.stat_:
nameID = self.stat_["ElidedFallbackNameID"]
name = nameTable.getDebugName(nameID)
if not name:
raise FeatureLibError(
f"ElidedFallbackNameID {nameID} points "
"to a nameID that does not exist in the "
'"name" table',
None,
)
elif "ElidedFallbackName" in self.stat_:
nameID = self.stat_["ElidedFallbackName"]
otl.buildStatTable(
self.font,
designAxes,
locations=format4_locations,
elidedFallbackName=nameID,
)
def build_codepages_(self, pages):
pages2bits = {
1252: 0,
1250: 1,
1251: 2,
1253: 3,
1254: 4,
1255: 5,
1256: 6,
1257: 7,
1258: 8,
874: 16,
932: 17,
936: 18,
949: 19,
950: 20,
1361: 21,
869: 48,
866: 49,
865: 50,
864: 51,
863: 52,
862: 53,
861: 54,
860: 55,
857: 56,
855: 57,
852: 58,
775: 59,
737: 60,
708: 61,
850: 62,
437: 63,
}
bits = [pages2bits[p] for p in pages if p in pages2bits]
pages = []
for i in range(2):
pages.append("")
for j in range(i * 32, (i + 1) * 32):
if j in bits:
pages[i] += "1"
else:
pages[i] += "0"
return [binary2num(p[::-1]) for p in pages]
def buildBASE(self):
if not self.base_horiz_axis_ and not self.base_vert_axis_:
return None
base = otTables.BASE()
base.Version = 0x00010000
base.HorizAxis = self.buildBASEAxis(self.base_horiz_axis_)
base.VertAxis = self.buildBASEAxis(self.base_vert_axis_)
result = newTable("BASE")
result.table = base
return result
def buildBASEAxis(self, axis):
if not axis:
return
bases, scripts = axis
axis = otTables.Axis()
axis.BaseTagList = otTables.BaseTagList()
axis.BaseTagList.BaselineTag = bases
axis.BaseTagList.BaseTagCount = len(bases)
axis.BaseScriptList = otTables.BaseScriptList()
axis.BaseScriptList.BaseScriptRecord = []
axis.BaseScriptList.BaseScriptCount = len(scripts)
for script in sorted(scripts):
record = otTables.BaseScriptRecord()
record.BaseScriptTag = script[0]
record.BaseScript = otTables.BaseScript()
record.BaseScript.BaseLangSysCount = 0
record.BaseScript.BaseValues = otTables.BaseValues()
record.BaseScript.BaseValues.DefaultIndex = bases.index(script[1])
record.BaseScript.BaseValues.BaseCoord = []
record.BaseScript.BaseValues.BaseCoordCount = len(script[2])
for c in script[2]:
coord = otTables.BaseCoord()
coord.Format = 1
coord.Coordinate = c
record.BaseScript.BaseValues.BaseCoord.append(coord)
axis.BaseScriptList.BaseScriptRecord.append(record)
return axis
def buildGDEF(self):
gdef = otTables.GDEF()
gdef.GlyphClassDef = self.buildGDEFGlyphClassDef_()
gdef.AttachList = otl.buildAttachList(self.attachPoints_, self.glyphMap)
gdef.LigCaretList = otl.buildLigCaretList(
self.ligCaretCoords_, self.ligCaretPoints_, self.glyphMap
)
gdef.MarkAttachClassDef = self.buildGDEFMarkAttachClassDef_()
gdef.MarkGlyphSetsDef = self.buildGDEFMarkGlyphSetsDef_()
gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef else 0x00010000
if any(
(
gdef.GlyphClassDef,
gdef.AttachList,
gdef.LigCaretList,
gdef.MarkAttachClassDef,
gdef.MarkGlyphSetsDef,
)
):
result = newTable("GDEF")
result.table = gdef
return result
else:
return None
def buildGDEFGlyphClassDef_(self):
if self.glyphClassDefs_:
classes = {g: c for (g, (c, _)) in self.glyphClassDefs_.items()}
else:
classes = {}
for lookup in self.lookups_:
classes.update(lookup.inferGlyphClasses())
for markClass in self.parseTree.markClasses.values():
for markClassDef in markClass.definitions:
for glyph in markClassDef.glyphSet():
classes[glyph] = 3
if classes:
result = otTables.GlyphClassDef()
result.classDefs = classes
return result
else:
return None
def buildGDEFMarkAttachClassDef_(self):
classDefs = {g: c for g, (c, _) in self.markAttach_.items()}
if not classDefs:
return None
result = otTables.MarkAttachClassDef()
result.classDefs = classDefs
return result
def buildGDEFMarkGlyphSetsDef_(self):
sets = []
for glyphs, id_ in sorted(
self.markFilterSets_.items(), key=lambda item: item[1]
):
sets.append(glyphs)
return otl.buildMarkGlyphSetsDef(sets, self.glyphMap)
def buildDebg(self):
if "Debg" not in self.font:
self.font["Debg"] = newTable("Debg")
self.font["Debg"].data = {}
self.font["Debg"].data[LOOKUP_DEBUG_INFO_KEY] = self.lookup_locations
def buildLookups_(self, tag):
assert tag in ("GPOS", "GSUB"), tag
for lookup in self.lookups_:
lookup.lookup_index = None
lookups = []
for lookup in self.lookups_:
if lookup.table != tag:
continue
lookup.lookup_index = len(lookups)
self.lookup_locations[tag][str(lookup.lookup_index)] = LookupDebugInfo(
location=str(lookup.location),
name=self.get_lookup_name_(lookup),
feature=None,
)
lookups.append(lookup)
try:
otLookups = [l.build() for l in lookups]
except OpenTypeLibError as e:
raise FeatureLibError(str(e), e.location) from e
return otLookups
def makeTable(self, tag):
table = getattr(otTables, tag, None)()
table.Version = 0x00010000
table.ScriptList = otTables.ScriptList()
table.ScriptList.ScriptRecord = []
table.FeatureList = otTables.FeatureList()
table.FeatureList.FeatureRecord = []
table.LookupList = otTables.LookupList()
table.LookupList.Lookup = self.buildLookups_(tag)
# Build a table for mapping (tag, lookup_indices) to feature_index.
# For example, ('liga', (2,3,7)) --> 23.
feature_indices = {}
required_feature_indices = {} # ('latn', 'DEU') --> 23
scripts = {} # 'latn' --> {'DEU': [23, 24]} for feature #23,24
# Sort the feature table by feature tag:
# https://github.com/fonttools/fonttools/issues/568
sortFeatureTag = lambda f: (f[0][2], f[0][1], f[0][0], f[1])
for key, lookups in sorted(self.features_.items(), key=sortFeatureTag):
script, lang, feature_tag = key
# l.lookup_index will be None when a lookup is not needed
# for the table under construction. For example, substitution
# rules will have no lookup_index while building GPOS tables.
lookup_indices = tuple(
[l.lookup_index for l in lookups if l.lookup_index is not None]
)
size_feature = tag == "GPOS" and feature_tag == "size"
if len(lookup_indices) == 0 and not size_feature:
continue
for ix in lookup_indices:
try:
self.lookup_locations[tag][str(ix)] = self.lookup_locations[tag][
str(ix)
]._replace(feature=key)
except KeyError:
warnings.warn(
"feaLib.Builder subclass needs upgrading to "
"stash debug information. See fonttools#2065."
)
feature_key = (feature_tag, lookup_indices)
feature_index = feature_indices.get(feature_key)
if feature_index is None:
feature_index = len(table.FeatureList.FeatureRecord)
frec = otTables.FeatureRecord()
frec.FeatureTag = feature_tag
frec.Feature = otTables.Feature()
frec.Feature.FeatureParams = self.buildFeatureParams(feature_tag)
frec.Feature.LookupListIndex = list(lookup_indices)
frec.Feature.LookupCount = len(lookup_indices)
table.FeatureList.FeatureRecord.append(frec)
feature_indices[feature_key] = feature_index
scripts.setdefault(script, {}).setdefault(lang, []).append(feature_index)
if self.required_features_.get((script, lang)) == feature_tag:
required_feature_indices[(script, lang)] = feature_index
# Build ScriptList.
for script, lang_features in sorted(scripts.items()):
srec = otTables.ScriptRecord()
srec.ScriptTag = script
srec.Script = otTables.Script()
srec.Script.DefaultLangSys = None
srec.Script.LangSysRecord = []
for lang, feature_indices in sorted(lang_features.items()):
langrec = otTables.LangSysRecord()
langrec.LangSys = otTables.LangSys()
langrec.LangSys.LookupOrder = None
req_feature_index = required_feature_indices.get((script, lang))
if req_feature_index is None:
langrec.LangSys.ReqFeatureIndex = 0xFFFF
else:
langrec.LangSys.ReqFeatureIndex = req_feature_index
langrec.LangSys.FeatureIndex = [
i for i in feature_indices if i != req_feature_index
]
langrec.LangSys.FeatureCount = len(langrec.LangSys.FeatureIndex)
if lang == "dflt":
srec.Script.DefaultLangSys = langrec.LangSys
else:
langrec.LangSysTag = lang
srec.Script.LangSysRecord.append(langrec)
srec.Script.LangSysCount = len(srec.Script.LangSysRecord)
table.ScriptList.ScriptRecord.append(srec)
table.ScriptList.ScriptCount = len(table.ScriptList.ScriptRecord)
table.FeatureList.FeatureCount = len(table.FeatureList.FeatureRecord)
table.LookupList.LookupCount = len(table.LookupList.Lookup)
return table
def get_lookup_name_(self, lookup):
rev = {v: k for k, v in self.named_lookups_.items()}
if lookup in rev:
return rev[lookup]
return None
def add_language_system(self, location, script, language):
# OpenType Feature File Specification, section 4.b.i
if script == "DFLT" and language == "dflt" and self.default_language_systems_:
raise FeatureLibError(
'If "languagesystem DFLT dflt" is present, it must be '
"the first of the languagesystem statements",
location,
)
if script == "DFLT":
if self.seen_non_DFLT_script_:
raise FeatureLibError(
'languagesystems using the "DFLT" script tag must '
"precede all other languagesystems",
location,
)
else:
self.seen_non_DFLT_script_ = True
if (script, language) in self.default_language_systems_:
raise FeatureLibError(
'"languagesystem %s %s" has already been specified'
% (script.strip(), language.strip()),
location,
)
self.default_language_systems_.add((script, language))
def get_default_language_systems_(self):
# OpenType Feature File specification, 4.b.i. languagesystem:
# If no "languagesystem" statement is present, then the
# implementation must behave exactly as though the following
# statement were present at the beginning of the feature file:
# languagesystem DFLT dflt;
if self.default_language_systems_:
return frozenset(self.default_language_systems_)
else:
return frozenset({("DFLT", "dflt")})
def start_feature(self, location, name):
self.language_systems = self.get_default_language_systems_()
self.script_ = "DFLT"
self.cur_lookup_ = None
self.cur_feature_name_ = name
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
if name == "aalt":
self.aalt_location_ = location
def end_feature(self):
assert self.cur_feature_name_ is not None
self.cur_feature_name_ = None
self.language_systems = None
self.cur_lookup_ = None
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def start_lookup_block(self, location, name):
if name in self.named_lookups_:
raise FeatureLibError(
'Lookup "%s" has already been defined' % name, location
)
if self.cur_feature_name_ == "aalt":
raise FeatureLibError(
"Lookup blocks cannot be placed inside 'aalt' features; "
"move it out, and then refer to it with a lookup statement",
location,
)
self.cur_lookup_name_ = name
self.named_lookups_[name] = None
self.cur_lookup_ = None
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def end_lookup_block(self):
assert self.cur_lookup_name_ is not None
self.cur_lookup_name_ = None
self.cur_lookup_ = None
if self.cur_feature_name_ is None:
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
def add_lookup_call(self, lookup_name):
assert lookup_name in self.named_lookups_, lookup_name
self.cur_lookup_ = None
lookup = self.named_lookups_[lookup_name]
if lookup is not None: # skip empty named lookup
self.add_lookup_to_feature_(lookup, self.cur_feature_name_)
def set_font_revision(self, location, revision):
self.fontRevision_ = revision
def set_language(self, location, language, include_default, required):
assert len(language) == 4
if self.cur_feature_name_ in ("aalt", "size"):
raise FeatureLibError(
"Language statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Language statements are not allowed "
"within standalone lookup blocks",
location,
)
self.cur_lookup_ = None
key = (self.script_, language, self.cur_feature_name_)
lookups = self.features_.get((key[0], "dflt", key[2]))
if (language == "dflt" or include_default) and lookups:
self.features_[key] = lookups[:]
else:
self.features_[key] = []
self.language_systems = frozenset([(self.script_, language)])
if required:
key = (self.script_, language)
if key in self.required_features_:
raise FeatureLibError(
"Language %s (script %s) has already "
"specified feature %s as its required feature"
% (
language.strip(),
self.script_.strip(),
self.required_features_[key].strip(),
),
location,
)
self.required_features_[key] = self.cur_feature_name_
def getMarkAttachClass_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markAttachClassID_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markAttachClassID_) + 1
self.markAttachClassID_[glyphs] = id_
for glyph in glyphs:
if glyph in self.markAttach_:
_, loc = self.markAttach_[glyph]
raise FeatureLibError(
"Glyph %s already has been assigned "
"a MarkAttachmentType at %s" % (glyph, loc),
location,
)
self.markAttach_[glyph] = (id_, location)
return id_
def getMarkFilterSet_(self, location, glyphs):
glyphs = frozenset(glyphs)
id_ = self.markFilterSets_.get(glyphs)
if id_ is not None:
return id_
id_ = len(self.markFilterSets_)
self.markFilterSets_[glyphs] = id_
return id_
def set_lookup_flag(self, location, value, markAttach, markFilter):
value = value & 0xFF
if markAttach:
markAttachClass = self.getMarkAttachClass_(location, markAttach)
value = value | (markAttachClass << 8)
if markFilter:
markFilterSet = self.getMarkFilterSet_(location, markFilter)
value = value | 0x10
self.lookupflag_markFilterSet_ = markFilterSet
else:
self.lookupflag_markFilterSet_ = None
self.lookupflag_ = value
def set_script(self, location, script):
if self.cur_feature_name_ in ("aalt", "size"):
raise FeatureLibError(
"Script statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
if self.cur_feature_name_ is None:
raise FeatureLibError(
"Script statements are not allowed " "within standalone lookup blocks",
location,
)
if self.language_systems == {(script, "dflt")}:
# Nothing to do.
return
self.cur_lookup_ = None
self.script_ = script
self.lookupflag_ = 0
self.lookupflag_markFilterSet_ = None
self.set_language(location, "dflt", include_default=True, required=False)
def find_lookup_builders_(self, lookups):
"""Helper for building chain contextual substitutions
Given a list of lookup names, finds the LookupBuilder for each name.
If an input name is None, it gets mapped to a None LookupBuilder.
"""
lookup_builders = []
for lookuplist in lookups:
if lookuplist is not None:
lookup_builders.append(
[self.named_lookups_.get(l.name) for l in lookuplist]
)
else:
lookup_builders.append(None)
return lookup_builders
def add_attach_points(self, location, glyphs, contourPoints):
for glyph in glyphs:
self.attachPoints_.setdefault(glyph, set()).update(contourPoints)
def add_chain_context_pos(self, location, prefix, glyphs, suffix, lookups):
lookup = self.get_lookup_(location, ChainContextPosBuilder)
lookup.rules.append(
ChainContextualRule(
prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
)
)
def add_chain_context_subst(self, location, prefix, glyphs, suffix, lookups):
lookup = self.get_lookup_(location, ChainContextSubstBuilder)
lookup.rules.append(
ChainContextualRule(
prefix, glyphs, suffix, self.find_lookup_builders_(lookups)
)
)
def add_alternate_subst(self, location, prefix, glyph, suffix, replacement):
if self.cur_feature_name_ == "aalt":
alts = self.aalt_alternates_.setdefault(glyph, set())
alts.update(replacement)
return
if prefix or suffix:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
lookup = self.get_chained_lookup_(location, AlternateSubstBuilder)
chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [lookup]))
else:
lookup = self.get_lookup_(location, AlternateSubstBuilder)
if glyph in lookup.alternates:
raise FeatureLibError(
'Already defined alternates for glyph "%s"' % glyph, location
)
lookup.alternates[glyph] = replacement
def add_feature_reference(self, location, featureName):
if self.cur_feature_name_ != "aalt":
raise FeatureLibError(
'Feature references are only allowed inside "feature aalt"', location
)
self.aalt_features_.append((location, featureName))
def add_featureName(self, tag):
self.featureNames_.add(tag)
def add_cv_parameter(self, tag):
self.cv_parameters_.add(tag)
def add_to_cv_num_named_params(self, tag):
"""Adds new items to ``self.cv_num_named_params_``
or increments the count of existing items."""
if tag in self.cv_num_named_params_:
self.cv_num_named_params_[tag] += 1
else:
self.cv_num_named_params_[tag] = 1
def add_cv_character(self, character, tag):
self.cv_characters_[tag].append(character)
def set_base_axis(self, bases, scripts, vertical):
if vertical:
self.base_vert_axis_ = (bases, scripts)
else:
self.base_horiz_axis_ = (bases, scripts)
def set_size_parameters(
self, location, DesignSize, SubfamilyID, RangeStart, RangeEnd
):
if self.cur_feature_name_ != "size":
raise FeatureLibError(
"Parameters statements are not allowed "
'within "feature %s"' % self.cur_feature_name_,
location,
)
self.size_parameters_ = [DesignSize, SubfamilyID, RangeStart, RangeEnd]
for script, lang in self.language_systems:
key = (script, lang, self.cur_feature_name_)
self.features_.setdefault(key, [])
def add_ligature_subst(
self, location, prefix, glyphs, suffix, replacement, forceChain
):
if prefix or suffix or forceChain:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
lookup = self.get_chained_lookup_(location, LigatureSubstBuilder)
chain.rules.append(ChainContextualRule(prefix, glyphs, suffix, [lookup]))
else:
lookup = self.get_lookup_(location, LigatureSubstBuilder)
# OpenType feature file syntax, section 5.d, "Ligature substitution":
# "Since the OpenType specification does not allow ligature
# substitutions to be specified on target sequences that contain
# glyph classes, the implementation software will enumerate
# all specific glyph sequences if glyph classes are detected"
for g in sorted(itertools.product(*glyphs)):
lookup.ligatures[g] = replacement
def add_multiple_subst(
self, location, prefix, glyph, suffix, replacements, forceChain=False
):
if prefix or suffix or forceChain:
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = self.get_chained_lookup_(location, MultipleSubstBuilder)
sub.mapping[glyph] = replacements
chain.rules.append(ChainContextualRule(prefix, [{glyph}], suffix, [sub]))
return
lookup = self.get_lookup_(location, MultipleSubstBuilder)
if glyph in lookup.mapping:
if replacements == lookup.mapping[glyph]:
log.info(
"Removing duplicate multiple substitution from glyph"
' "%s" to %s%s',
glyph,
replacements,
f" at {location}" if location else "",
)
else:
raise FeatureLibError(
'Already defined substitution for glyph "%s"' % glyph, location
)
lookup.mapping[glyph] = replacements
def add_reverse_chain_single_subst(self, location, old_prefix, old_suffix, mapping):
lookup = self.get_lookup_(location, ReverseChainSingleSubstBuilder)
lookup.rules.append((old_prefix, old_suffix, mapping))
def add_single_subst(self, location, prefix, suffix, mapping, forceChain):
if self.cur_feature_name_ == "aalt":
for (from_glyph, to_glyph) in mapping.items():
alts = self.aalt_alternates_.setdefault(from_glyph, set())
alts.add(to_glyph)
return
if prefix or suffix or forceChain:
self.add_single_subst_chained_(location, prefix, suffix, mapping)
return
lookup = self.get_lookup_(location, SingleSubstBuilder)
for (from_glyph, to_glyph) in mapping.items():
if from_glyph in lookup.mapping:
if to_glyph == lookup.mapping[from_glyph]:
log.info(
"Removing duplicate single substitution from glyph"
' "%s" to "%s" at %s',
from_glyph,
to_glyph,
location,
)
else:
raise FeatureLibError(
'Already defined rule for replacing glyph "%s" by "%s"'
% (from_glyph, lookup.mapping[from_glyph]),
location,
)
lookup.mapping[from_glyph] = to_glyph
def add_single_subst_chained_(self, location, prefix, suffix, mapping):
# https://github.com/fonttools/fonttools/issues/512
chain = self.get_lookup_(location, ChainContextSubstBuilder)
sub = chain.find_chainable_single_subst(set(mapping.keys()))
if sub is None:
sub = self.get_chained_lookup_(location, SingleSubstBuilder)
sub.mapping.update(mapping)
chain.rules.append(
ChainContextualRule(prefix, [list(mapping.keys())], suffix, [sub])
)
def add_cursive_pos(self, location, glyphclass, entryAnchor, exitAnchor):
lookup = self.get_lookup_(location, CursivePosBuilder)
lookup.add_attachment(
location,
glyphclass,
makeOpenTypeAnchor(entryAnchor),
makeOpenTypeAnchor(exitAnchor),
)
def add_marks_(self, location, lookupBuilder, marks):
"""Helper for add_mark_{base,liga,mark}_pos."""
for _, markClass in marks:
for markClassDef in markClass.definitions:
for mark in markClassDef.glyphs.glyphSet():
if mark not in lookupBuilder.marks:
otMarkAnchor = makeOpenTypeAnchor(markClassDef.anchor)
lookupBuilder.marks[mark] = (markClass.name, otMarkAnchor)
else:
existingMarkClass = lookupBuilder.marks[mark][0]
if markClass.name != existingMarkClass:
raise FeatureLibError(
"Glyph %s cannot be in both @%s and @%s"
% (mark, existingMarkClass, markClass.name),
location,
)
def add_mark_base_pos(self, location, bases, marks):
builder = self.get_lookup_(location, MarkBasePosBuilder)
self.add_marks_(location, builder, marks)
for baseAnchor, markClass in marks:
otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
for base in bases:
builder.bases.setdefault(base, {})[markClass.name] = otBaseAnchor
def add_mark_lig_pos(self, location, ligatures, components):
builder = self.get_lookup_(location, MarkLigPosBuilder)
componentAnchors = []
for marks in components:
anchors = {}
self.add_marks_(location, builder, marks)
for ligAnchor, markClass in marks:
anchors[markClass.name] = makeOpenTypeAnchor(ligAnchor)
componentAnchors.append(anchors)
for glyph in ligatures:
builder.ligatures[glyph] = componentAnchors
def add_mark_mark_pos(self, location, baseMarks, marks):
builder = self.get_lookup_(location, MarkMarkPosBuilder)
self.add_marks_(location, builder, marks)
for baseAnchor, markClass in marks:
otBaseAnchor = makeOpenTypeAnchor(baseAnchor)
for baseMark in baseMarks:
builder.baseMarks.setdefault(baseMark, {})[
markClass.name
] = otBaseAnchor
def add_class_pair_pos(self, location, glyphclass1, value1, glyphclass2, value2):
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = makeOpenTypeValueRecord(value1, pairPosContext=True)
v2 = makeOpenTypeValueRecord(value2, pairPosContext=True)
lookup.addClassPair(location, glyphclass1, v1, glyphclass2, v2)
def add_subtable_break(self, location):
self.cur_lookup_.add_subtable_break(location)
def add_specific_pair_pos(self, location, glyph1, value1, glyph2, value2):
lookup = self.get_lookup_(location, PairPosBuilder)
v1 = makeOpenTypeValueRecord(value1, pairPosContext=True)
v2 = makeOpenTypeValueRecord(value2, pairPosContext=True)
lookup.addGlyphPair(location, glyph1, v1, glyph2, v2)
def add_single_pos(self, location, prefix, suffix, pos, forceChain):
if prefix or suffix or forceChain:
self.add_single_pos_chained_(location, prefix, suffix, pos)
else:
lookup = self.get_lookup_(location, SinglePosBuilder)
for glyphs, value in pos:
otValueRecord = makeOpenTypeValueRecord(value, pairPosContext=False)
for glyph in glyphs:
try:
lookup.add_pos(location, glyph, otValueRecord)
except OpenTypeLibError as e:
raise FeatureLibError(str(e), e.location) from e
def add_single_pos_chained_(self, location, prefix, suffix, pos):
# https://github.com/fonttools/fonttools/issues/514
chain = self.get_lookup_(location, ChainContextPosBuilder)
targets = []
for _, _, _, lookups in chain.rules:
targets.extend(lookups)
subs = []
for glyphs, value in pos:
if value is None:
subs.append(None)
continue
otValue = makeOpenTypeValueRecord(value, pairPosContext=False)
sub = chain.find_chainable_single_pos(targets, glyphs, otValue)
if sub is None:
sub = self.get_chained_lookup_(location, SinglePosBuilder)
targets.append(sub)
for glyph in glyphs:
sub.add_pos(location, glyph, otValue)
subs.append(sub)
assert len(pos) == len(subs), (pos, subs)
chain.rules.append(
ChainContextualRule(prefix, [g for g, v in pos], suffix, subs)
)
def setGlyphClass_(self, location, glyph, glyphClass):
oldClass, oldLocation = self.glyphClassDefs_.get(glyph, (None, None))
if oldClass and oldClass != glyphClass:
raise FeatureLibError(
"Glyph %s was assigned to a different class at %s"
% (glyph, oldLocation),
location,
)
self.glyphClassDefs_[glyph] = (glyphClass, location)
def add_glyphClassDef(
self, location, baseGlyphs, ligatureGlyphs, markGlyphs, componentGlyphs
):
for glyph in baseGlyphs:
self.setGlyphClass_(location, glyph, 1)
for glyph in ligatureGlyphs:
self.setGlyphClass_(location, glyph, 2)
for glyph in markGlyphs:
self.setGlyphClass_(location, glyph, 3)
for glyph in componentGlyphs:
self.setGlyphClass_(location, glyph, 4)
def add_ligatureCaretByIndex_(self, location, glyphs, carets):
for glyph in glyphs:
if glyph not in self.ligCaretPoints_:
self.ligCaretPoints_[glyph] = carets
def add_ligatureCaretByPos_(self, location, glyphs, carets):
for glyph in glyphs:
if glyph not in self.ligCaretCoords_:
self.ligCaretCoords_[glyph] = carets
def add_name_record(self, location, nameID, platformID, platEncID, langID, string):
self.names_.append([nameID, platformID, platEncID, langID, string])
def add_os2_field(self, key, value):
self.os2_[key] = value
def add_hhea_field(self, key, value):
self.hhea_[key] = value
def add_vhea_field(self, key, value):
self.vhea_[key] = value
def makeOpenTypeAnchor(anchor):
"""ast.Anchor --> otTables.Anchor"""
if anchor is None:
return None
deviceX, deviceY = None, None
if anchor.xDeviceTable is not None:
deviceX = otl.buildDevice(dict(anchor.xDeviceTable))
if anchor.yDeviceTable is not None:
deviceY = otl.buildDevice(dict(anchor.yDeviceTable))
return otl.buildAnchor(anchor.x, anchor.y, anchor.contourpoint, deviceX, deviceY)
_VALUEREC_ATTRS = {
name[0].lower() + name[1:]: (name, isDevice)
for _, name, isDevice, _ in otBase.valueRecordFormat
if not name.startswith("Reserved")
}
def makeOpenTypeValueRecord(v, pairPosContext):
"""ast.ValueRecord --> otBase.ValueRecord"""
if not v:
return None
vr = {}
for astName, (otName, isDevice) in _VALUEREC_ATTRS.items():
val = getattr(v, astName, None)
if val:
vr[otName] = otl.buildDevice(dict(val)) if isDevice else val
if pairPosContext and not vr:
vr = {"YAdvance": 0} if v.vertical else {"XAdvance": 0}
valRec = otl.buildValue(vr)
return valRec
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/feaLib/builder.py",
"copies": "4",
"size": "59498",
"license": "apache-2.0",
"hash": 5363078476778788000,
"line_mean": 39.1470985155,
"line_max": 88,
"alpha_frac": 0.5605566574,
"autogenerated": false,
"ratio": 4.089771789936761,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6650328447336762,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import tobytes, tostr
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
from . import DefaultTable
GMAPFormat = """
> # big endian
tableVersionMajor: H
tableVersionMinor: H
flags: H
recordsCount: H
recordsOffset: H
fontNameLength: H
"""
# psFontName is a byte string which follows the record above. This is zero padded
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
GMAPRecordFormat1 = """
> # big endian
UV: L
cid: H
gid: H
ggid: H
name: 32s
"""
class GMAPRecord(object):
def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
self.UV = uv
self.cid = cid
self.gid = gid
self.ggid = ggid
self.name = name
def toXML(self, writer, ttFont):
writer.begintag("GMAPRecord")
writer.newline()
writer.simpletag("UV", value=self.UV)
writer.newline()
writer.simpletag("cid", value=self.cid)
writer.newline()
writer.simpletag("gid", value=self.gid)
writer.newline()
writer.simpletag("glyphletGid", value=self.gid)
writer.newline()
writer.simpletag("GlyphletName", value=self.name)
writer.newline()
writer.endtag("GMAPRecord")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name == "GlyphletName":
self.name = value
else:
setattr(self, name, safeEval(value))
def compile(self, ttFont):
if self.UV is None:
self.UV = 0
nameLen = len(self.name)
if nameLen < 32:
self.name = self.name + "\0"*(32 - nameLen)
data = sstruct.pack(GMAPRecordFormat1, self)
return data
def __repr__(self):
return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]"
class table_G_M_A_P_(DefaultTable.DefaultTable):
dependencies = []
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
self.psFontName = tostr(newData[:self.fontNameLength])
assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
newData = data[self.recordsOffset:]
self.gmapRecords = []
for i in range (self.recordsCount):
gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord())
gmapRecord.name = gmapRecord.name.strip('\0')
self.gmapRecords.append(gmapRecord)
def compile(self, ttFont):
self.recordsCount = len(self.gmapRecords)
self.fontNameLength = len(self.psFontName)
self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
data = sstruct.pack(GMAPFormat, self)
data = data + tobytes(self.psFontName)
data = data + b"\0" * (self.recordsOffset - len(data))
for record in self.gmapRecords:
data = data + record.compile(ttFont)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(GMAPFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
writer.simpletag("PSFontName", value=self.psFontName)
writer.newline()
for gmapRecord in self.gmapRecords:
gmapRecord.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "GMAPRecord":
if not hasattr(self, "gmapRecords"):
self.gmapRecords = []
gmapRecord = GMAPRecord()
self.gmapRecords.append(gmapRecord)
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
gmapRecord.fromXML(name, attrs, content, ttFont)
else:
value = attrs["value"]
if name == "PSFontName":
self.psFontName = value
else:
setattr(self, name, safeEval(value))
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/G_M_A_P_.py",
"copies": "5",
"size": "3755",
"license": "apache-2.0",
"hash": 1342476547796580900,
"line_mean": 28.5669291339,
"line_max": 174,
"alpha_frac": 0.6886817577,
"autogenerated": false,
"ratio": 2.8576864535768647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046368211276865,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.py23 import tostr
from fontTools.pens.transformPen import TransformPen
from fontTools.misc import etree
from .parser import parse_path
from .shapes import PathBuilder
__all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
class SVGPath(object):
""" Parse SVG ``path`` elements from a file or string, and draw them
onto a glyph object that supports the FontTools Pen protocol.
For example, reading from an SVG file and drawing to a Defcon Glyph:
import defcon
glyph = defcon.Glyph()
pen = glyph.getPen()
svg = SVGPath("path/to/a.svg")
svg.draw(pen)
Or reading from a string containing SVG data, using the alternative
'fromstring' (a class method):
data = '<?xml version="1.0" ...'
svg = SVGPath.fromstring(data)
svg.draw(pen)
Both constructors can optionally take a 'transform' matrix (6-float
tuple, or a FontTools Transform object) to modify the draw output.
"""
def __init__(self, filename=None, transform=None):
if filename is None:
self.root = etree.ElementTree()
else:
tree = etree.parse(filename)
self.root = tree.getroot()
self.transform = transform
@classmethod
def fromstring(cls, data, transform=None):
self = cls(transform=transform)
self.root = etree.fromstring(data)
return self
def draw(self, pen):
if self.transform:
pen = TransformPen(pen, self.transform)
pb = PathBuilder()
# xpath | doesn't seem to reliable work so just walk it
for el in self.root.iter():
pb.add_path_from_element(el)
original_pen = pen
for path, transform in zip(pb.paths, pb.transforms):
if transform:
pen = TransformPen(original_pen, transform)
else:
pen = original_pen
parse_path(path, pen)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/svgLib/path/__init__.py",
"copies": "5",
"size": "1951",
"license": "apache-2.0",
"hash": -7407992158775994000,
"line_mean": 30.4677419355,
"line_max": 72,
"alpha_frac": 0.6186570989,
"autogenerated": false,
"ratio": 4.081589958158996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 62
} |
from fontTools.misc.py23 import unichr
import vanilla
from AppKit import NSSegmentStyleSmallSquare
from defconAppKit.windows.baseWindow import BaseWindowController
from mojo.roboFont import OpenWindow
from mojo.UI import UpdateCurrentGlyphView
from ramsayStData import RamsayStData
class AddGlyphNameSheet(object):
def __init__(self, parentWindow, callback=None):
self.callback = callback
self.w = vanilla.Sheet((350, 90), parentWindow=parentWindow)
self.w.glyphNameText = vanilla.TextBox((10, 17, 100, 22), "Glyph Name:")
self.w.glyphName = vanilla.EditText((100, 17, -10, 22))
self.w.addButton = vanilla.Button((-70, -30, -10, 20), "Add", callback=self.addCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.addButton)
self.w.closeButton = vanilla.Button((-150, -30, -80, 20), "Cancel", callback=self.closeCallback, sizeStyle="small")
self.w.closeButton.bind(".", ["command"])
self.w.closeButton.bind(unichr(27), [])
self.w.open()
def addCallback(self, sender):
if self.callback:
self.callback(self)
self.closeCallback(sender)
def closeCallback(self, sender):
self.w.close()
def get(self):
return self.w.glyphName.get()
class RamsayStSettingsWindowController(BaseWindowController):
def __init__(self):
self.w = vanilla.FloatingWindow((310, 300), "Ramsay St. Settings", minSize=(310, 250), maxSize=(310, 700))
self.w.showPreview = vanilla.CheckBox((10, 10, -10, 22), "Show Preview", value=RamsayStData.showPreview, callback=self.showPreviewCallback)
self.w.fillColorText = vanilla.TextBox((10, 40, 110, 22), "Fill Color:")
self.w.fillColor = vanilla.ColorWell((10, 60, 110, 40), color=RamsayStData.fillColor, callback=self.fillColorCallback)
self.w.strokeColorText = vanilla.TextBox((130, 40, -10, 22), "Stroke Color:")
self.w.strokeColor = vanilla.ColorWell((130, 60, -10, 40), color=RamsayStData.strokeColor, callback=self.strokeColorCallback)
items = RamsayStData.getItems()
columnDescriptions = [
dict(title="Glyph Name", key="glyphName"),
dict(title="Left", key="left"),
dict(title="Right", key="right"),
]
self.w.dataList = vanilla.List((10, 110, -10, -40), items, columnDescriptions=columnDescriptions, editCallback=self.dataListEditCallback)
segmentDescriptions = [dict(title="+"), dict(title="-"), dict(title="import"), dict(title="export")]
self.w.addDel = vanilla.SegmentedButton((12, -32, -140, 20), segmentDescriptions, selectionStyle="momentary", callback=self.addDelCallback)
self.w.addDel.getNSSegmentedButton().setSegmentStyle_(NSSegmentStyleSmallSquare)
self.w.okButton = vanilla.Button((-70, -30, -15, 20), "Apply", callback=self.okCallback, sizeStyle="small")
self.w.setDefaultButton(self.w.okButton)
self.w.closeButton = vanilla.Button((-140, -30, -80, 20), "Cancel", callback=self.closeCallback, sizeStyle="small")
self.w.closeButton.bind(".", ["command"])
self.w.closeButton.bind(unichr(27), [])
self.w.open()
def showPreviewCallback(self, sender):
RamsayStData.showPreview = sender.get()
RamsayStData.save()
self.updateView()
def fillColorCallback(self, sender):
RamsayStData.fillColor = sender.get()
RamsayStData.save()
self.updateView()
def strokeColorCallback(self, sender):
RamsayStData.strokeColor = sender.get()
RamsayStData.save()
self.updateView()
def _addGlyphName(self, sender):
glyphName = sender.get()
if glyphName in RamsayStData.keys():
index = 0
for item in self.w.dataList:
if glyphName == item.glyphName():
break
index += 1
self.w.dataList.setSelection([index])
return
self.w.dataList.append(RamsayStData.newItem(glyphName))
def addGlyphName(self):
AddGlyphNameSheet(self.w, self._addGlyphName)
def delGlyphName(self):
sel = self.w.dataList.getSelection()
if sel:
index = sel[0]
del self.w.dataList[index]
def importGlyphNames(self):
self.showGetFile(["ramsaySt"], self._importGlyphNames)
def _importGlyphNames(self, path):
if path:
path = path[0]
f = open(path, "r")
lines = f.readlines()
f.close()
data = dict()
for line in lines:
if line.startswith("#"):
continue
items = line.split()
if len(items) != 3:
continue
leftGlyphName, glyphName, rightGlyphName = items
data[glyphName] = leftGlyphName, rightGlyphName
RamsayStData.clear()
RamsayStData.update(data)
self.w.dataList.set(RamsayStData.getItems())
self.updateView()
def exportGlyphNames(self):
self.showPutFile(["ramsaySt"], self._exportGlyphNames)
def _exportGlyphNames(self, path):
if path is None:
return
output = [
"# Ramsay St. Glyph List",
"# <glyphName> <leftGlyphName> <rightGlyphGlyphName>"
]
for glyphName in sorted(RamsayStData.keys()):
value = RamsayStData.get(glyphName, None)
if value is not None:
output.append("%s %s %s" % (glyphName, value[0], value[1]))
f = open(path, "w")
f.write("\n".join(output))
f.close()
def addDelCallback(self, sender):
v = sender.get()
if v == 0:
# add
self.addGlyphName()
elif v == 1:
# remove
self.delGlyphName()
elif v == 2:
# import
self.importGlyphNames()
elif v == 3:
# export
self.exportGlyphNames()
def okCallback(self, sender):
RamsayStData.setItems(self.w.dataList)
RamsayStData.save()
self.updateView()
def closeCallback(self, sender):
self.w.close()
def dataListEditCallback(self, sender):
sel = sender.getSelection()
for i in sel:
item = sender[i]
RamsayStData.set(item)
def updateView(self):
UpdateCurrentGlyphView()
OpenWindow(RamsayStSettingsWindowController)
| {
"repo_name": "typemytype/ramsayStreetRoboFontExtension",
"path": "RamsaySt.roboFontExt/lib/ramsayStSettings.py",
"copies": "1",
"size": "6606",
"license": "mit",
"hash": -7742487052892665000,
"line_mean": 32.7040816327,
"line_max": 147,
"alpha_frac": 0.5976385104,
"autogenerated": false,
"ratio": 3.7512776831345827,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48489161935345826,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.roundTools import noRound, otRound
from fontTools.ttLib.tables import otTables as ot
from fontTools.varLib.models import supportScalar
from fontTools.varLib.builder import (buildVarRegionList, buildVarStore,
buildVarRegion, buildVarData)
from functools import partial
from collections import defaultdict
from array import array
def _getLocationKey(loc):
return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
class OnlineVarStoreBuilder(object):
def __init__(self, axisTags):
self._axisTags = axisTags
self._regionMap = {}
self._regionList = buildVarRegionList([], axisTags)
self._store = buildVarStore(self._regionList, [])
self._data = None
self._model = None
self._supports = None
self._varDataIndices = {}
self._varDataCaches = {}
self._cache = {}
def setModel(self, model):
self.setSupports(model.supports)
self._model = model
def setSupports(self, supports):
self._model = None
self._supports = list(supports)
if not self._supports[0]:
del self._supports[0] # Drop base master support
self._cache = {}
self._data = None
def finish(self, optimize=True):
self._regionList.RegionCount = len(self._regionList.Region)
self._store.VarDataCount = len(self._store.VarData)
for data in self._store.VarData:
data.ItemCount = len(data.Item)
data.calculateNumShorts(optimize=optimize)
return self._store
def _add_VarData(self):
regionMap = self._regionMap
regionList = self._regionList
regions = self._supports
regionIndices = []
for region in regions:
key = _getLocationKey(region)
idx = regionMap.get(key)
if idx is None:
varRegion = buildVarRegion(region, self._axisTags)
idx = regionMap[key] = len(regionList.Region)
regionList.Region.append(varRegion)
regionIndices.append(idx)
# Check if we have one already...
key = tuple(regionIndices)
varDataIdx = self._varDataIndices.get(key)
if varDataIdx is not None:
self._outer = varDataIdx
self._data = self._store.VarData[varDataIdx]
self._cache = self._varDataCaches[key]
if len(self._data.Item) == 0xFFFF:
# This is full. Need new one.
varDataIdx = None
if varDataIdx is None:
self._data = buildVarData(regionIndices, [], optimize=False)
self._outer = len(self._store.VarData)
self._store.VarData.append(self._data)
self._varDataIndices[key] = self._outer
if key not in self._varDataCaches:
self._varDataCaches[key] = {}
self._cache = self._varDataCaches[key]
def storeMasters(self, master_values):
deltas = self._model.getDeltas(master_values, round=round)
base = deltas.pop(0)
return base, self.storeDeltas(deltas, round=noRound)
def storeDeltas(self, deltas, *, round=round):
deltas = [round(d) for d in deltas]
if len(deltas) == len(self._supports) + 1:
deltas = tuple(deltas[1:])
else:
assert len(deltas) == len(self._supports)
deltas = tuple(deltas)
varIdx = self._cache.get(deltas)
if varIdx is not None:
return varIdx
if not self._data:
self._add_VarData()
inner = len(self._data.Item)
if inner == 0xFFFF:
# Full array. Start new one.
self._add_VarData()
return self.storeDeltas(deltas)
self._data.addItem(deltas, round=noRound)
varIdx = (self._outer << 16) + inner
self._cache[deltas] = varIdx
return varIdx
def VarData_addItem(self, deltas, *, round=round):
deltas = [round(d) for d in deltas]
countUs = self.VarRegionCount
countThem = len(deltas)
if countUs + 1 == countThem:
deltas = tuple(deltas[1:])
else:
assert countUs == countThem, (countUs, countThem)
deltas = tuple(deltas)
self.Item.append(list(deltas))
self.ItemCount = len(self.Item)
ot.VarData.addItem = VarData_addItem
def VarRegion_get_support(self, fvar_axes):
return {
fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord)
for i, reg in enumerate(self.VarRegionAxis)
if reg.PeakCoord != 0
}
ot.VarRegion.get_support = VarRegion_get_support
class VarStoreInstancer(object):
def __init__(self, varstore, fvar_axes, location={}):
self.fvar_axes = fvar_axes
assert varstore is None or varstore.Format == 1
self._varData = varstore.VarData if varstore else []
self._regions = varstore.VarRegionList.Region if varstore else []
self.setLocation(location)
def setLocation(self, location):
self.location = dict(location)
self._clearCaches()
def _clearCaches(self):
self._scalars = {}
def _getScalar(self, regionIdx):
scalar = self._scalars.get(regionIdx)
if scalar is None:
support = self._regions[regionIdx].get_support(self.fvar_axes)
scalar = supportScalar(self.location, support)
self._scalars[regionIdx] = scalar
return scalar
@staticmethod
def interpolateFromDeltasAndScalars(deltas, scalars):
delta = 0.
for d,s in zip(deltas, scalars):
if not s: continue
delta += d * s
return delta
def __getitem__(self, varidx):
major, minor = varidx >> 16, varidx & 0xFFFF
varData = self._varData
scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
deltas = varData[major].Item[minor]
return self.interpolateFromDeltasAndScalars(deltas, scalars)
def interpolateFromDeltas(self, varDataIndex, deltas):
varData = self._varData
scalars = [self._getScalar(ri) for ri in
varData[varDataIndex].VarRegionIndex]
return self.interpolateFromDeltasAndScalars(deltas, scalars)
#
# Optimizations
#
# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
def VarStore_subset_varidxes(self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()):
# Sort out used varIdxes by major/minor.
used = {}
for varIdx in varIdxes:
major = varIdx >> 16
minor = varIdx & 0xFFFF
d = used.get(major)
if d is None:
d = used[major] = set()
d.add(minor)
del varIdxes
#
# Subset VarData
#
varData = self.VarData
newVarData = []
varDataMap = {}
for major,data in enumerate(varData):
usedMinors = used.get(major)
if usedMinors is None:
continue
newMajor = len(newVarData)
newVarData.append(data)
items = data.Item
newItems = []
if major == 0 and retainFirstMap:
for minor in range(len(items)):
newItems.append(items[minor] if minor in usedMinors else [0] * len(items[minor]))
varDataMap[minor] = minor
else:
if major == 0:
minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
else:
minors = sorted(usedMinors)
for minor in minors:
newMinor = len(newItems)
newItems.append(items[minor])
varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
data.Item = newItems
data.ItemCount = len(data.Item)
data.calculateNumShorts(optimize=optimize)
self.VarData = newVarData
self.VarDataCount = len(self.VarData)
self.prune_regions()
return varDataMap
ot.VarStore.subset_varidxes = VarStore_subset_varidxes
def VarStore_prune_regions(self):
"""Remove unused VarRegions."""
#
# Subset VarRegionList
#
# Collect.
usedRegions = set()
for data in self.VarData:
usedRegions.update(data.VarRegionIndex)
# Subset.
regionList = self.VarRegionList
regions = regionList.Region
newRegions = []
regionMap = {}
for i in sorted(usedRegions):
regionMap[i] = len(newRegions)
newRegions.append(regions[i])
regionList.Region = newRegions
regionList.RegionCount = len(regionList.Region)
# Map.
for data in self.VarData:
data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
ot.VarStore.prune_regions = VarStore_prune_regions
def _visit(self, func):
"""Recurse down from self, if type of an object is ot.Device,
call func() on it. Works on otData-style classes."""
if type(self) == ot.Device:
func(self)
elif isinstance(self, list):
for that in self:
_visit(that, func)
elif hasattr(self, 'getConverters') and not hasattr(self, 'postRead'):
for conv in self.getConverters():
that = getattr(self, conv.name, None)
if that is not None:
_visit(that, func)
elif isinstance(self, ot.ValueRecord):
for that in self.__dict__.values():
_visit(that, func)
def _Device_recordVarIdx(self, s):
"""Add VarIdx in this Device table (if any) to the set s."""
if self.DeltaFormat == 0x8000:
s.add((self.StartSize<<16)+self.EndSize)
def Object_collect_device_varidxes(self, varidxes):
adder = partial(_Device_recordVarIdx, s=varidxes)
_visit(self, adder)
ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes
ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes
def _Device_mapVarIdx(self, mapping, done):
"""Map VarIdx in this Device table (if any) through mapping."""
if id(self) in done:
return
done.add(id(self))
if self.DeltaFormat == 0x8000:
varIdx = mapping[(self.StartSize<<16)+self.EndSize]
self.StartSize = varIdx >> 16
self.EndSize = varIdx & 0xFFFF
def Object_remap_device_varidxes(self, varidxes_map):
mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
_visit(self, mapper)
ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes
ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes
class _Encoding(object):
def __init__(self, chars):
self.chars = chars
self.width = self._popcount(chars)
self.overhead = self._characteristic_overhead(chars)
self.items = set()
def append(self, row):
self.items.add(row)
def extend(self, lst):
self.items.update(lst)
def get_room(self):
"""Maximum number of bytes that can be added to characteristic
while still being beneficial to merge it into another one."""
count = len(self.items)
return max(0, (self.overhead - 1) // count - self.width)
room = property(get_room)
@property
def gain(self):
"""Maximum possible byte gain from merging this into another
characteristic."""
count = len(self.items)
return max(0, self.overhead - count * (self.width + 1))
def sort_key(self):
return self.width, self.chars
def __len__(self):
return len(self.items)
def can_encode(self, chars):
return not (chars & ~self.chars)
def __sub__(self, other):
return self._popcount(self.chars & ~other.chars)
@staticmethod
def _popcount(n):
# Apparently this is the fastest native way to do it...
# https://stackoverflow.com/a/9831671
return bin(n).count('1')
@staticmethod
def _characteristic_overhead(chars):
"""Returns overhead in bytes of encoding this characteristic
as a VarData."""
c = 6
while chars:
if chars & 3:
c += 2
chars >>= 2
return c
def _find_yourself_best_new_encoding(self, done_by_width):
self.best_new_encoding = None
for new_width in range(self.width+1, self.width+self.room+1):
for new_encoding in done_by_width[new_width]:
if new_encoding.can_encode(self.chars):
break
else:
new_encoding = None
self.best_new_encoding = new_encoding
class _EncodingDict(dict):
def __missing__(self, chars):
r = self[chars] = _Encoding(chars)
return r
def add_row(self, row):
chars = self._row_characteristics(row)
self[chars].append(row)
@staticmethod
def _row_characteristics(row):
"""Returns encoding characteristics for a row."""
chars = 0
i = 1
for v in row:
if v:
chars += i
if not (-128 <= v <= 127):
chars += i * 2
i <<= 2
return chars
def VarStore_optimize(self):
"""Optimize storage. Returns mapping from old VarIdxes to new ones."""
# TODO
# Check that no two VarRegions are the same; if they are, fold them.
n = len(self.VarRegionList.Region) # Number of columns
zeroes = array('h', [0]*n)
front_mapping = {} # Map from old VarIdxes to full row tuples
encodings = _EncodingDict()
# Collect all items into a set of full rows (with lots of zeroes.)
for major,data in enumerate(self.VarData):
regionIndices = data.VarRegionIndex
for minor,item in enumerate(data.Item):
row = array('h', zeroes)
for regionIdx,v in zip(regionIndices, item):
row[regionIdx] += v
row = tuple(row)
encodings.add_row(row)
front_mapping[(major<<16)+minor] = row
# Separate encodings that have no gain (are decided) and those having
# possible gain (possibly to be merged into others.)
encodings = sorted(encodings.values(), key=_Encoding.__len__, reverse=True)
done_by_width = defaultdict(list)
todo = []
for encoding in encodings:
if not encoding.gain:
done_by_width[encoding.width].append(encoding)
else:
todo.append(encoding)
# For each encoding that is possibly to be merged, find the best match
# in the decided encodings, and record that.
todo.sort(key=_Encoding.get_room)
for encoding in todo:
encoding._find_yourself_best_new_encoding(done_by_width)
# Walk through todo encodings, for each, see if merging it with
# another todo encoding gains more than each of them merging with
# their best decided encoding. If yes, merge them and add resulting
# encoding back to todo queue. If not, move the enconding to decided
# list. Repeat till done.
while todo:
encoding = todo.pop()
best_idx = None
best_gain = 0
for i,other_encoding in enumerate(todo):
combined_chars = other_encoding.chars | encoding.chars
combined_width = _Encoding._popcount(combined_chars)
combined_overhead = _Encoding._characteristic_overhead(combined_chars)
combined_gain = (
+ encoding.overhead
+ other_encoding.overhead
- combined_overhead
- (combined_width - encoding.width) * len(encoding)
- (combined_width - other_encoding.width) * len(other_encoding)
)
this_gain = 0 if encoding.best_new_encoding is None else (
+ encoding.overhead
- (encoding.best_new_encoding.width - encoding.width) * len(encoding)
)
other_gain = 0 if other_encoding.best_new_encoding is None else (
+ other_encoding.overhead
- (other_encoding.best_new_encoding.width - other_encoding.width) * len(other_encoding)
)
separate_gain = this_gain + other_gain
if combined_gain > separate_gain:
best_idx = i
best_gain = combined_gain - separate_gain
if best_idx is None:
# Encoding is decided as is
done_by_width[encoding.width].append(encoding)
else:
other_encoding = todo[best_idx]
combined_chars = other_encoding.chars | encoding.chars
combined_encoding = _Encoding(combined_chars)
combined_encoding.extend(encoding.items)
combined_encoding.extend(other_encoding.items)
combined_encoding._find_yourself_best_new_encoding(done_by_width)
del todo[best_idx]
todo.append(combined_encoding)
# Assemble final store.
back_mapping = {} # Mapping from full rows to new VarIdxes
encodings = sum(done_by_width.values(), [])
encodings.sort(key=_Encoding.sort_key)
self.VarData = []
for major,encoding in enumerate(encodings):
data = ot.VarData()
self.VarData.append(data)
data.VarRegionIndex = range(n)
data.VarRegionCount = len(data.VarRegionIndex)
data.Item = sorted(encoding.items)
for minor,item in enumerate(data.Item):
back_mapping[item] = (major<<16)+minor
# Compile final mapping.
varidx_map = {}
for k,v in front_mapping.items():
varidx_map[k] = back_mapping[v]
# Remove unused regions.
self.prune_regions()
# Recalculate things and go home.
self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
self.VarDataCount = len(self.VarData)
for data in self.VarData:
data.ItemCount = len(data.Item)
data.optimize()
return varidx_map
ot.VarStore.optimize = VarStore_optimize
def main(args=None):
"""Optimize a font's GDEF variation store"""
from argparse import ArgumentParser
from fontTools import configLogger
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
parser = ArgumentParser(prog='varLib.varStore', description= main.__doc__)
parser.add_argument('fontfile')
parser.add_argument('outfile', nargs='?')
options = parser.parse_args(args)
# TODO: allow user to configure logging via command-line options
configLogger(level="INFO")
fontfile = options.fontfile
outfile = options.outfile
font = TTFont(fontfile)
gdef = font['GDEF']
store = gdef.table.VarStore
writer = OTTableWriter()
store.compile(writer, font)
size = len(writer.getAllData())
print("Before: %7d bytes" % size)
varidx_map = store.optimize()
gdef.table.remap_device_varidxes(varidx_map)
if 'GPOS' in font:
font['GPOS'].table.remap_device_varidxes(varidx_map)
writer = OTTableWriter()
store.compile(writer, font)
size = len(writer.getAllData())
print("After: %7d bytes" % size)
if outfile is not None:
font.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/varLib/varStore.py",
"copies": "3",
"size": "16627",
"license": "apache-2.0",
"hash": -8710384574531715000,
"line_mean": 27.1336717428,
"line_max": 104,
"alpha_frac": 0.7075239069,
"autogenerated": false,
"ratio": 3.0258416742493175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5233365581149318,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.roundTools import otRound
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import sys
import struct
import array
import logging
log = logging.getLogger(__name__)
class table__h_m_t_x(DefaultTable.DefaultTable):
headerTag = 'hhea'
advanceName = 'width'
sideBearingName = 'lsb'
numberOfMetricsName = 'numberOfHMetrics'
longMetricFormat = 'Hh'
def decompile(self, data, ttFont):
numGlyphs = ttFont['maxp'].numGlyphs
headerTable = ttFont.get(self.headerTag)
if headerTable is not None:
numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
else:
numberOfMetrics = numGlyphs
if numberOfMetrics > numGlyphs:
log.warning("The %s.%s exceeds the maxp.numGlyphs" % (
self.headerTag, self.numberOfMetricsName))
numberOfMetrics = numGlyphs
if len(data) < 4 * numberOfMetrics:
raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
# Note: advanceWidth is unsigned, but some font editors might
# read/write as signed. We can't be sure whether it was a mistake
# or not, so we read as unsigned but also issue a warning...
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])
data = data[4 * numberOfMetrics:]
numberOfSideBearings = numGlyphs - numberOfMetrics
sideBearings = array.array("h", data[:2 * numberOfSideBearings])
data = data[2 * numberOfSideBearings:]
if sys.byteorder != "big": sideBearings.byteswap()
if data:
log.warning("too much '%s' table data" % self.tableTag)
self.metrics = {}
glyphOrder = ttFont.getGlyphOrder()
for i in range(numberOfMetrics):
glyphName = glyphOrder[i]
advanceWidth, lsb = metrics[i*2:i*2+2]
if advanceWidth > 32767:
log.warning(
"Glyph %r has a huge advance %s (%d); is it intentional or "
"an (invalid) negative value?", glyphName, self.advanceName,
advanceWidth)
self.metrics[glyphName] = (advanceWidth, lsb)
lastAdvance = metrics[-2]
for i in range(numberOfSideBearings):
glyphName = glyphOrder[i + numberOfMetrics]
self.metrics[glyphName] = (lastAdvance, sideBearings[i])
def compile(self, ttFont):
metrics = []
hasNegativeAdvances = False
for glyphName in ttFont.getGlyphOrder():
advanceWidth, sideBearing = self.metrics[glyphName]
if advanceWidth < 0:
log.error("Glyph %r has negative advance %s" % (
glyphName, self.advanceName))
hasNegativeAdvances = True
metrics.append([advanceWidth, sideBearing])
headerTable = ttFont.get(self.headerTag)
if headerTable is not None:
lastAdvance = metrics[-1][0]
lastIndex = len(metrics)
while metrics[lastIndex-2][0] == lastAdvance:
lastIndex -= 1
if lastIndex <= 1:
# all advances are equal
lastIndex = 1
break
additionalMetrics = metrics[lastIndex:]
additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
metrics = metrics[:lastIndex]
numberOfMetrics = len(metrics)
setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
else:
# no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
numberOfMetrics = ttFont["maxp"].numGlyphs
additionalMetrics = []
allMetrics = []
for advance, sb in metrics:
allMetrics.extend([otRound(advance), otRound(sb)])
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
try:
data = struct.pack(metricsFmt, *allMetrics)
except struct.error as e:
if "out of range" in str(e) and hasNegativeAdvances:
raise ttLib.TTLibError(
"'%s' table can't contain negative advance %ss"
% (self.tableTag, self.advanceName))
else:
raise
additionalMetrics = array.array("h", additionalMetrics)
if sys.byteorder != "big": additionalMetrics.byteswap()
data = data + additionalMetrics.tobytes()
return data
def toXML(self, writer, ttFont):
names = sorted(self.metrics.keys())
for glyphName in names:
advance, sb = self.metrics[glyphName]
writer.simpletag("mtx", [
("name", glyphName),
(self.advanceName, advance),
(self.sideBearingName, sb),
])
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "metrics"):
self.metrics = {}
if name == "mtx":
self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]),
safeEval(attrs[self.sideBearingName]))
def __delitem__(self, glyphName):
del self.metrics[glyphName]
def __getitem__(self, glyphName):
return self.metrics[glyphName]
def __setitem__(self, glyphName, advance_sb_pair):
self.metrics[glyphName] = tuple(advance_sb_pair)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_h_m_t_x.py",
"copies": "5",
"size": "4618",
"license": "apache-2.0",
"hash": -8706608085830748000,
"line_mean": 32.4637681159,
"line_max": 72,
"alpha_frac": 0.708315288,
"autogenerated": false,
"ratio": 3.252112676056338,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029027508341479824,
"num_lines": 138
} |
from fontTools.misc.roundTools import otRound
from fontTools.misc.transform import Transform
from fontTools.pens.filterPen import FilterPen, FilterPointPen
__all__ = ["RoundingPen", "RoundingPointPen"]
class RoundingPen(FilterPen):
"""
Filter pen that rounds point coordinates and component XY offsets to integer.
>>> from fontTools.pens.recordingPen import RecordingPen
>>> recpen = RecordingPen()
>>> roundpen = RoundingPen(recpen)
>>> roundpen.moveTo((0.4, 0.6))
>>> roundpen.lineTo((1.6, 2.5))
>>> roundpen.qCurveTo((2.4, 4.6), (3.3, 5.7), (4.9, 6.1))
>>> roundpen.curveTo((6.4, 8.6), (7.3, 9.7), (8.9, 10.1))
>>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5))
>>> recpen.value == [
... ('moveTo', ((0, 1),)),
... ('lineTo', ((2, 3),)),
... ('qCurveTo', ((2, 5), (3, 6), (5, 6))),
... ('curveTo', ((6, 9), (7, 10), (9, 10))),
... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10))),
... ]
True
"""
def __init__(self, outPen, roundFunc=otRound):
super().__init__(outPen)
self.roundFunc = roundFunc
def moveTo(self, pt):
self._outPen.moveTo((self.roundFunc(pt[0]), self.roundFunc(pt[1])))
def lineTo(self, pt):
self._outPen.lineTo((self.roundFunc(pt[0]), self.roundFunc(pt[1])))
def curveTo(self, *points):
self._outPen.curveTo(
*((self.roundFunc(x), self.roundFunc(y)) for x, y in points)
)
def qCurveTo(self, *points):
self._outPen.qCurveTo(
*((self.roundFunc(x), self.roundFunc(y)) for x, y in points)
)
def addComponent(self, glyphName, transformation):
self._outPen.addComponent(
glyphName,
Transform(
*transformation[:4],
self.roundFunc(transformation[4]),
self.roundFunc(transformation[5]),
),
)
class RoundingPointPen(FilterPointPen):
"""
Filter point pen that rounds point coordinates and component XY offsets to integer.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> recpen = RecordingPointPen()
>>> roundpen = RoundingPointPen(recpen)
>>> roundpen.beginPath()
>>> roundpen.addPoint((0.4, 0.6), 'line')
>>> roundpen.addPoint((1.6, 2.5), 'line')
>>> roundpen.addPoint((2.4, 4.6))
>>> roundpen.addPoint((3.3, 5.7))
>>> roundpen.addPoint((4.9, 6.1), 'qcurve')
>>> roundpen.endPath()
>>> roundpen.addComponent("a", (1.5, 0, 0, 1.5, 10.5, -10.5))
>>> recpen.value == [
... ('beginPath', (), {}),
... ('addPoint', ((0, 1), 'line', False, None), {}),
... ('addPoint', ((2, 3), 'line', False, None), {}),
... ('addPoint', ((2, 5), None, False, None), {}),
... ('addPoint', ((3, 6), None, False, None), {}),
... ('addPoint', ((5, 6), 'qcurve', False, None), {}),
... ('endPath', (), {}),
... ('addComponent', ('a', (1.5, 0, 0, 1.5, 11, -10)), {}),
... ]
True
"""
def __init__(self, outPen, roundFunc=otRound):
super().__init__(outPen)
self.roundFunc = roundFunc
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(
(self.roundFunc(pt[0]), self.roundFunc(pt[1])),
segmentType=segmentType,
smooth=smooth,
name=name,
**kwargs,
)
def addComponent(self, baseGlyphName, transformation, **kwargs):
self._outPen.addComponent(
baseGlyphName,
Transform(
*transformation[:4],
self.roundFunc(transformation[4]),
self.roundFunc(transformation[5]),
),
**kwargs,
)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/roundingPen.py",
"copies": "5",
"size": "3803",
"license": "apache-2.0",
"hash": 7474128881076738000,
"line_mean": 32.9553571429,
"line_max": 87,
"alpha_frac": 0.5308966605,
"autogenerated": false,
"ratio": 3.2728055077452667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6303702168245267,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import struct
import array
# XXX I've lowered the strictness, to make sure Apple's own Chicago
# XXX gets through. They're looking into it, I hope to raise the standards
# XXX back to normal eventually.
class table_L_T_S_H_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
version, numGlyphs = struct.unpack(">HH", data[:4])
data = data[4:]
assert version == 0, "unknown version: %s" % version
assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
# ouch: the assertion is not true in Chicago!
#assert numGlyphs == ttFont['maxp'].numGlyphs
yPels = array.array("B")
yPels.frombytes(data)
self.yPels = {}
for i in range(numGlyphs):
self.yPels[ttFont.getGlyphName(i)] = yPels[i]
def compile(self, ttFont):
version = 0
names = list(self.yPels.keys())
numGlyphs = len(names)
yPels = [0] * numGlyphs
# ouch: the assertion is not true in Chicago!
#assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
for name in names:
yPels[ttFont.getGlyphID(name)] = self.yPels[name]
yPels = array.array("B", yPels)
return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
def toXML(self, writer, ttFont):
names = sorted(self.yPels.keys())
for name in names:
writer.simpletag("yPel", name=name, value=self.yPels[name])
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "yPels"):
self.yPels = {}
if name != "yPel":
return # ignore unknown tags
self.yPels[attrs["name"]] = safeEval(attrs["value"])
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/L_T_S_H_.py",
"copies": "5",
"size": "1613",
"license": "apache-2.0",
"hash": 1923461349534792400,
"line_mean": 32.6041666667,
"line_max": 75,
"alpha_frac": 0.6924984501,
"autogenerated": false,
"ratio": 2.8248686514886163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6017367101588617,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import struct
GASP_SYMMETRIC_GRIDFIT = 0x0004
GASP_SYMMETRIC_SMOOTHING = 0x0008
GASP_DOGRAY = 0x0002
GASP_GRIDFIT = 0x0001
class table__g_a_s_p(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
self.version, numRanges = struct.unpack(">HH", data[:4])
assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
data = data[4:]
self.gaspRange = {}
for i in range(numRanges):
rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
data = data[4:]
assert not data, "too much data"
def compile(self, ttFont):
version = 0 # ignore self.version
numRanges = len(self.gaspRange)
data = b""
items = sorted(self.gaspRange.items())
for rangeMaxPPEM, rangeGaspBehavior in items:
data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
version = 1
data = struct.pack(">HH", version, numRanges) + data
return data
def toXML(self, writer, ttFont):
items = sorted(self.gaspRange.items())
for rangeMaxPPEM, rangeGaspBehavior in items:
writer.simpletag("gaspRange", [
("rangeMaxPPEM", rangeMaxPPEM),
("rangeGaspBehavior", rangeGaspBehavior)])
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name != "gaspRange":
return
if not hasattr(self, "gaspRange"):
self.gaspRange = {}
self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_g_a_s_p.py",
"copies": "5",
"size": "1585",
"license": "apache-2.0",
"hash": 5047191961928382000,
"line_mean": 31.3469387755,
"line_max": 88,
"alpha_frac": 0.7059936909,
"autogenerated": false,
"ratio": 2.8354203935599283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6041414084459928,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import sys
import array
class table__c_v_t(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
values = array.array("h")
values.frombytes(data)
if sys.byteorder != "big": values.byteswap()
self.values = values
def compile(self, ttFont):
values = self.values[:]
if sys.byteorder != "big": values.byteswap()
return values.tobytes()
def toXML(self, writer, ttFont):
for i in range(len(self.values)):
value = self.values[i]
writer.simpletag("cv", value=value, index=i)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "values"):
self.values = array.array("h")
if name == "cv":
index = safeEval(attrs["index"])
value = safeEval(attrs["value"])
for i in range(1 + index - len(self.values)):
self.values.append(0)
self.values[index] = value
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __setitem__(self, index, value):
self.values[index] = value
def __delitem__(self, index):
del self.values[index]
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_c_v_t.py",
"copies": "5",
"size": "1145",
"license": "apache-2.0",
"hash": 2425795897745965000,
"line_mean": 24.4444444444,
"line_max": 49,
"alpha_frac": 0.6759825328,
"autogenerated": false,
"ratio": 3.0291005291005293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6205083061900529,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.transform import Transform
from defcon.objects.base import BaseObject
_defaultTransformation = (1, 0, 0, 1, 0, 0)
class Component(BaseObject):
"""
This object represents a reference to another glyph.
**This object posts the following notifications:**
========================== ====
Name Note
========================== ====
Component.Changed Posted when the *dirty* attribute is set.
Component.BaseGlyphChanged Posted when the *baseGlyph* attribute is set.
========================== ====
"""
changeNotificationName = "Component.Changed"
def __init__(self):
super(Component, self).__init__()
self._dirty = False
self._baseGlyph = None
self._transformation = tuple(_defaultTransformation)
self._boundsCache = None
self._controlPointBoundsCache = None
# ----------
# Attributes
# ----------
def _getBounds(self, boundsPen):
glyph = self.getParent()
if glyph is None:
return None
font = glyph.getParent()
if font is None:
return None
if self.baseGlyph not in font:
return None
pen = boundsPen(font)
self.draw(pen)
return pen.bounds
def _get_bounds(self):
from robofab.pens.boundsPen import BoundsPen
if self._boundsCache is None:
self._boundsCache = self._getBounds(BoundsPen)
return self._boundsCache
bounds = property(_get_bounds, doc="The bounds of the components's outline expressed as a tuple of form (xMin, yMin, xMax, yMax).")
def _get_controlPointBounds(self):
from fontTools.pens.boundsPen import ControlBoundsPen
if self._controlPointBoundsCache is None:
self._controlPointBoundsCache = self._getBounds(ControlBoundsPen)
return self._controlPointBoundsCache
controlPointBounds = property(_get_controlPointBounds, doc="The control bounds of all points in the components. This only measures the point positions, it does not measure curves. So, curves without points at the extrema will not be properly measured.")
def _set_baseGlyph(self, value):
oldValue = self._baseGlyph
self._baseGlyph = value
self._destroyBoundsCache()
self.dirty = True
dispatcher = self.dispatcher
if dispatcher is not None:
dispatcher.postNotification(notification="Component.BaseGlyphChanged", observable=self, data=(oldValue, value))
def _get_baseGlyph(self):
return self._baseGlyph
baseGlyph = property(_get_baseGlyph, _set_baseGlyph, doc="The glyph that the components references. Setting this will post *Component.BaseGlyphChanged* and *Component.Changed* notifications.")
def _set_transformation(self, value):
self._transformation = value
self._destroyBoundsCache()
self.dirty = True
def _get_transformation(self):
return self._transformation
transformation = property(_get_transformation, _set_transformation, doc="The transformation matrix for the component. Setting this will posts a *Component.Changed* notification.")
# -----------
# Pen Methods
# -----------
def draw(self, pen):
"""
Draw the component with **pen**.
"""
from robofab.pens.adapterPens import PointToSegmentPen
pointPen = PointToSegmentPen(pen)
self.drawPoints(pointPen)
def drawPoints(self, pointPen):
"""
Draw the component with **pointPen**.
"""
pointPen.addComponent(self._baseGlyph, self._transformation)
# -------
# Methods
# -------
def move(self, (x, y)):
"""
Move the component by **(x, y)**.
This posts a *Component.Changed* notification.
"""
xScale, xyScale, yxScale, yScale, xOffset, yOffset = self._transformation
xOffset += x
yOffset += y
self.transformation = (xScale, xyScale, yxScale, yScale, xOffset, yOffset)
def pointInside(self, (x, y), evenOdd=False):
"""
Returns a boolean indicating if **(x, y)** is in the
"black" area of the component.
"""
from fontTools.pens.pointInsidePen import PointInsidePen
glyph = self.getParent()
if glyph is None:
return False
font = self.getParent()
if font is None:
return False
pen = PointInsidePen(glyphSet=font, testPoint=(x, y), evenOdd=evenOdd)
self.draw(pen)
return pen.getResult()
# ----
# Undo
# ----
def getDataToSerializeForUndo(self):
data = dict(
baseGlyph=self.baseGlyph,
transformation=self.transformation
)
return data
def loadDeserializedDataFromUndo(self, data):
self.baseGlyph = data["baseGlyph"]
self.transformation = data["transformation"]
def _destroyBoundsCache(self):
self._boundsCache = None
self._controlPointBoundsCache = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"repo_name": "typemytype/defcon",
"path": "Lib/defcon/objects/component.py",
"copies": "1",
"size": "5191",
"license": "mit",
"hash": -6486660219302099000,
"line_mean": 31.0432098765,
"line_max": 257,
"alpha_frac": 0.6077827008,
"autogenerated": false,
"ratio": 4.37689713322091,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005918921045025986,
"num_lines": 162
} |
from fontTools.misc.transform import Transform
from ufo2ft.filters import BaseFilter
import logging
logger = logging.getLogger(__name__)
class FlattenComponentsFilter(BaseFilter):
def __call__(self, font, glyphSet=None):
if super(FlattenComponentsFilter, self).__call__(font, glyphSet):
modified = self.context.modified
if modified:
logger.info("Flattened composite glyphs: %i" % len(modified))
return modified
def filter(self, glyph):
flattened = False
if not glyph.components:
return flattened
pen = glyph.getPen()
for comp in list(glyph.components):
flattened_tuples = _flattenComponent(self.context.glyphSet, comp)
if flattened_tuples[0] != (comp.baseGlyph, comp.transformation):
flattened = True
glyph.removeComponent(comp)
for flattened_tuple in flattened_tuples:
pen.addComponent(*flattened_tuple)
if flattened:
self.context.modified.add(glyph.name)
return flattened
def _flattenComponent(glyphSet, component):
"""Returns a list of tuples (baseGlyph, transform) of nested component."""
glyph = glyphSet[component.baseGlyph]
if not glyph.components:
transformation = Transform(*component.transformation)
return [(component.baseGlyph, transformation)]
all_flattened_components = []
for nested in glyph.components:
flattened_components = _flattenComponent(glyphSet, nested)
for i, (_, tr) in enumerate(flattened_components):
tr = tr.transform(component.transformation)
flattened_components[i] = (flattened_components[i][0], tr)
all_flattened_components.extend(flattened_components)
return all_flattened_components
| {
"repo_name": "jamesgk/ufo2ft",
"path": "Lib/ufo2ft/filters/flattenComponents.py",
"copies": "1",
"size": "1836",
"license": "mit",
"hash": 1626815989282869200,
"line_mean": 35.72,
"line_max": 78,
"alpha_frac": 0.6574074074,
"autogenerated": false,
"ratio": 4.601503759398496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5758911166798497,
"avg_score": null,
"num_lines": null
} |
from fontTools.misc.transform import Transform
from ufo2ft.filters import BaseFilter
import logging
logger = logging.getLogger(__name__)
class FlattenComponentsFilter(BaseFilter):
def __call__(self, font, glyphSet=None):
if super(FlattenComponentsFilter, self).__call__(font, glyphSet):
modified = self.context.modified
if modified:
logger.info('Flattened composite glyphs: %i' %
len(modified))
return modified
def filter(self, glyph):
flattened = False
if not glyph.components:
return flattened
pen = glyph.getPen()
for comp in list(glyph.components):
flattened_tuples = _flattenComponent(self.context.glyphSet, comp)
if flattened_tuples[0] != (comp.baseGlyph, comp.transformation):
flattened = True
glyph.removeComponent(comp)
for flattened_tuple in flattened_tuples:
pen.addComponent(*flattened_tuple)
if flattened:
self.context.modified.add(glyph.name)
return flattened
def _flattenComponent(glyphSet, component):
"""Returns a list of tuples (baseGlyph, transform) of nested component."""
glyph = glyphSet[component.baseGlyph]
if not glyph.components:
transformation = Transform(*component.transformation)
return [(component.baseGlyph, transformation)]
all_flattened_components = []
for nested in glyph.components:
flattened_components = _flattenComponent(glyphSet, nested)
for i, (_, tr) in enumerate(flattened_components):
tr = tr.transform(component.transformation)
flattened_components[i] = (flattened_components[i][0], tr)
all_flattened_components.extend(flattened_components)
return all_flattened_components
| {
"repo_name": "jamesgk/ufo2fdk",
"path": "Lib/ufo2ft/filters/flattenComponents.py",
"copies": "2",
"size": "1865",
"license": "mit",
"hash": 736537670217299000,
"line_mean": 34.8653846154,
"line_max": 78,
"alpha_frac": 0.6471849866,
"autogenerated": false,
"ratio": 4.6625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6309684986599999,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import AbstractPen, BasePen
from robofab.misc.bezierTools import splitLine, splitCubic
from sets import Set
class MarginPen(BasePen):
"""
Pen to calculate the margins at a given value.
When isHorizontal is True, the margins at <value> are horizontal.
When isHorizontal is False, the margins at <value> are vertical.
When a glyphset or font is given, MarginPen will also calculate for glyphs with components.
pen.getMargins() returns the minimum and maximum intersections of the glyph.
pen.getContourMargins() returns the minimum and maximum intersections for each contour.
Possible optimisation:
Initialise the pen object with a list of points we want to measure,
then draw the glyph once, but do the splitLine() math for all measure points.
"""
def __init__(self, glyphSet, value, isHorizontal=True):
BasePen.__init__(self, glyphSet)
self.value = value
self.hits = {}
self.filterDoubles = True
self.contourIndex = None
self.startPt = None
self.isHorizontal = isHorizontal
def _moveTo(self, pt):
self.currentPt = pt
self.startPt = pt
if self.contourIndex is None:
self.contourIndex = 0
else:
self.contourIndex += 1
def _lineTo(self, pt):
if self.filterDoubles:
if pt == self.currentPt:
return
hits = splitLine(self.currentPt, pt, self.value, self.isHorizontal)
if len(hits)>1:
# result will be 2 tuples of 2 coordinates
# first two points: start to intersect
# second two points: intersect to end
# so, second point in first tuple is the intersect
# then, the first coordinate of that point is the x.
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
if self.isHorizontal:
self.hits[self.contourIndex].append(round(hits[0][-1][0], 4))
else:
self.hits[self.contourIndex].append(round(hits[0][-1][1], 4))
if self.isHorizontal and pt[1] == self.value:
# it could happen
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
self.hits[self.contourIndex].append(pt[0])
elif (not self.isHorizontal) and (pt[0] == self.value):
# it could happen
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
self.hits[self.contourIndex].append(pt[1])
self.currentPt = pt
def _curveToOne(self, pt1, pt2, pt3):
hits = splitCubic(self.currentPt, pt1, pt2, pt3, self.value, self.isHorizontal)
for i in range(len(hits)-1):
# a number of intersections is possible. Just take the
# last point of each segment.
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
if self.isHorizontal:
self.hits[self.contourIndex].append(round(hits[i][-1][0], 4))
else:
self.hits[self.contourIndex].append(round(hits[i][-1][1], 4))
if self.isHorizontal and pt3[1] == self.value:
# it could happen
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
self.hits[self.contourIndex].append(pt3[0])
if (not self.isHorizontal) and (pt3[0] == self.value):
# it could happen
if not self.contourIndex in self.hits:
self.hits[self.contourIndex] = []
self.hits[self.contourIndex].append(pt3[1])
self.currentPt = pt3
def _closePath(self):
if self.currentPt != self.startPt:
self._lineTo(self.startPt)
self.currentPt = self.startPt = None
def _endPath(self):
self.currentPt = None
def addComponent(self, baseGlyph, transformation):
from fontTools.pens.transformPen import TransformPen
if self.glyphSet is None:
return
if baseGlyph in self.glyphSet:
glyph = self.glyphSet[baseGlyph]
if glyph is None:
return
tPen = TransformPen(self, transformation)
glyph.draw(tPen)
def getMargins(self):
"""Get the horizontal margins for all contours combined, i.e. the whole glyph."""
allHits = []
for index, pts in self.hits.items():
allHits.extend(pts)
if allHits:
return min(allHits), max(allHits)
return None
def getContourMargins(self):
"""Get the horizontal margins for each contour."""
allHits = {}
for index, pts in self.hits.items():
unique = list(Set(pts))
unique.sort()
allHits[index] = unique
return allHits
def getAll(self):
"""Get all the slices."""
allHits = []
for index, pts in self.hits.items():
allHits.extend(pts)
unique = list(Set(allHits))
unique = list(unique)
unique.sort()
return unique
if __name__ == "__main__":
from robofab.world import CurrentGlyph, CurrentFont
f = CurrentFont()
g = CurrentGlyph()
pt = (74, 216)
pen = MarginPen(f, pt[1], isHorizontal=False)
g.draw(pen)
print 'glyph Y margins', pen.getMargins()
print pen.getContourMargins()
| {
"repo_name": "moyogo/robofab",
"path": "Lib/robofab/pens/marginPen.py",
"copies": "9",
"size": "4833",
"license": "bsd-3-clause",
"hash": 5928203480065633000,
"line_mean": 29.1806451613,
"line_max": 93,
"alpha_frac": 0.6741154562,
"autogenerated": false,
"ratio": 3.1423927178153446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8316508174015345,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import AbstractPen
from fontTools.pens.pointPen import AbstractPointPen
from fontTools.pens.recordingPen import RecordingPen
class _PassThruComponentsMixin(object):
def addComponent(self, glyphName, transformation, **kwargs):
self._outPen.addComponent(glyphName, transformation, **kwargs)
class FilterPen(_PassThruComponentsMixin, AbstractPen):
""" Base class for pens that apply some transformation to the coordinates
they receive and pass them to another pen.
You can override any of its methods. The default implementation does
nothing, but passes the commands unmodified to the other pen.
>>> from fontTools.pens.recordingPen import RecordingPen
>>> rec = RecordingPen()
>>> pen = FilterPen(rec)
>>> v = iter(rec.value)
>>> pen.moveTo((0, 0))
>>> next(v)
('moveTo', ((0, 0),))
>>> pen.lineTo((1, 1))
>>> next(v)
('lineTo', ((1, 1),))
>>> pen.curveTo((2, 2), (3, 3), (4, 4))
>>> next(v)
('curveTo', ((2, 2), (3, 3), (4, 4)))
>>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8))
>>> next(v)
('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8)))
>>> pen.closePath()
>>> next(v)
('closePath', ())
>>> pen.moveTo((9, 9))
>>> next(v)
('moveTo', ((9, 9),))
>>> pen.endPath()
>>> next(v)
('endPath', ())
>>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0))
>>> next(v)
('addComponent', ('foo', (1, 0, 0, 1, 0, 0)))
"""
def __init__(self, outPen):
self._outPen = outPen
def moveTo(self, pt):
self._outPen.moveTo(pt)
def lineTo(self, pt):
self._outPen.lineTo(pt)
def curveTo(self, *points):
self._outPen.curveTo(*points)
def qCurveTo(self, *points):
self._outPen.qCurveTo(*points)
def closePath(self):
self._outPen.closePath()
def endPath(self):
self._outPen.endPath()
class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
"""A "buffered" filter pen that accumulates contour data, passes
it through a ``filterContour`` method when the contour is closed or ended,
and finally draws the result with the output pen.
Components are passed through unchanged.
"""
def __init__(self, outPen):
super(ContourFilterPen, self).__init__()
self._outPen = outPen
def closePath(self):
super(ContourFilterPen, self).closePath()
self._flushContour()
def endPath(self):
super(ContourFilterPen, self).endPath()
self._flushContour()
def _flushContour(self):
result = self.filterContour(self.value)
if result is not None:
self.value = result
self.replay(self._outPen)
self.value = []
def filterContour(self, contour):
"""Subclasses must override this to perform the filtering.
The contour is a list of pen (operator, operands) tuples.
Operators are strings corresponding to the AbstractPen methods:
"moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and
"endPath". The operands are the positional arguments that are
passed to each method.
If the method doesn't return a value (i.e. returns None), it's
assumed that the argument was modified in-place.
Otherwise, the return value is drawn with the output pen.
"""
return # or return contour
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
""" Baseclass for point pens that apply some transformation to the
coordinates they receive and pass them to another point pen.
You can override any of its methods. The default implementation does
nothing, but passes the commands unmodified to the other pen.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen()
>>> pen = FilterPointPen(rec)
>>> v = iter(rec.value)
>>> pen.beginPath(identifier="abc")
>>> next(v)
('beginPath', (), {'identifier': 'abc'})
>>> pen.addPoint((1, 2), "line", False)
>>> next(v)
('addPoint', ((1, 2), 'line', False, None), {})
>>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001")
>>> next(v)
('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'})
>>> pen.endPath()
>>> next(v)
('endPath', (), {})
"""
def __init__(self, outPointPen):
self._outPen = outPointPen
def beginPath(self, **kwargs):
self._outPen.beginPath(**kwargs)
def endPath(self):
self._outPen.endPath()
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/filterPen.py",
"copies": "5",
"size": "4732",
"license": "apache-2.0",
"hash": -7731379216678571000,
"line_mean": 28.9493670886,
"line_max": 80,
"alpha_frac": 0.6077768385,
"autogenerated": false,
"ratio": 3.6012176560121767,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6708994494512177,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import AbstractPen
from ufoLib.pointPen import AbstractPointPen
__all__ = ["AbstractPointPen", "BasePointToSegmentPen", "PrintingPointPen",
"PrintingSegmentPen", "SegmentPrintingPointPen"]
class BasePointToSegmentPen(AbstractPointPen):
"""Base class for retrieving the outline in a segment-oriented
way. The PointPen protocol is simple yet also a little tricky,
so when you need an outline presented as segments but you have
as points, do use this base implementation as it properly takes
care of all the edge cases.
"""
def __init__(self):
self.currentPath = None
def beginPath(self, **kwargs):
assert self.currentPath is None
self.currentPath = []
def _flushContour(self, segments):
"""Override this method.
It will be called for each non-empty sub path with a list
of segments: the 'segments' argument.
The segments list contains tuples of length 2:
(segmentType, points)
segmentType is one of "move", "line", "curve" or "qcurve".
"move" may only occur as the first segment, and it signifies
an OPEN path. A CLOSED path does NOT start with a "move", in
fact it will not contain a "move" at ALL.
The 'points' field in the 2-tuple is a list of point info
tuples. The list has 1 or more items, a point tuple has
four items:
(point, smooth, name, kwargs)
'point' is an (x, y) coordinate pair.
For a closed path, the initial moveTo point is defined as
the last point of the last segment.
The 'points' list of "move" and "line" segments always contains
exactly one point tuple.
"""
raise NotImplementedError
def endPath(self):
assert self.currentPath is not None
points = self.currentPath
self.currentPath = None
if not points:
return
if len(points) == 1:
# Not much more we can do than output a single move segment.
pt, segmentType, smooth, name, kwargs = points[0]
segments = [("move", [(pt, smooth, name, kwargs)])]
self._flushContour(segments)
return
segments = []
if points[0][1] == "move":
# It's an open contour, insert a "move" segment for the first
# point and remove that first point from the point list.
pt, segmentType, smooth, name, kwargs = points[0]
segments.append(("move", [(pt, smooth, name, kwargs)]))
points.pop(0)
else:
# It's a closed contour. Locate the first on-curve point, and
# rotate the point list so that it _ends_ with an on-curve
# point.
firstOnCurve = None
for i in range(len(points)):
segmentType = points[i][1]
if segmentType is not None:
firstOnCurve = i
break
if firstOnCurve is None:
# Special case for quadratics: a contour with no on-curve
# points. Add a "None" point. (See also the Pen protocol's
# qCurveTo() method and fontTools.pens.basePen.py.)
points.append((None, "qcurve", None, None, None))
else:
points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
currentSegment = []
for pt, segmentType, smooth, name, kwargs in points:
currentSegment.append((pt, smooth, name, kwargs))
if segmentType is None:
continue
segments.append((segmentType, currentSegment))
currentSegment = []
self._flushContour(segments)
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
class PrintingPointPen(AbstractPointPen):
def __init__(self):
self.havePath = False
def beginPath(self, identifier=None, **kwargs):
self.havePath = True
args = []
if identifier is not None:
args.append("identifier=%r" % identifier)
if kwargs:
args.append("**%s" % kwargs)
print("pen.beginPath(%s)" % ", ".join(args))
def endPath(self):
self.havePath = False
print("pen.endPath()")
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
assert self.havePath
args = ["(%s, %s)" % (pt[0], pt[1])]
if segmentType is not None:
args.append("segmentType=%r" % segmentType)
if smooth:
args.append("smooth=True")
if name is not None:
args.append("name=%r" % name)
if identifier is not None:
args.append("identifier=%r" % identifier)
if kwargs:
args.append("**%s" % kwargs)
print("pen.addPoint(%s)" % ", ".join(args))
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
assert not self.havePath
args = [baseGlyphName, str(tuple(transformation))]
if identifier is not None:
args.append("identifier=%r" % identifier)
if kwargs:
args.append("**%s" % kwargs)
print("pen.addComponent(%s)" % ", ".join(args))
class PrintingSegmentPen(AbstractPen):
def moveTo(self, pt):
print("pen.moveTo(%s)" % (pt,))
def lineTo(self, pt):
print("pen.lineTo(%s)" % (pt,))
def curveTo(self, *pts):
print("pen.curveTo%s" % (pts,))
def qCurveTo(self, *pts):
print("pen.qCurveTo%s" % (pts,))
def closePath(self):
print("pen.closePath()")
def endPath(self):
print("pen.endPath()")
def addComponent(self, baseGlyphName, transformation):
print("pen.addComponent(%r, %s)" % (baseGlyphName, tuple(transformation)))
class SegmentPrintingPointPen(BasePointToSegmentPen):
def _flushContour(self, segments):
from pprint import pprint
pprint(segments)
if __name__ == "__main__":
p = SegmentPrintingPointPen()
from robofab.test.test_pens import TestShapes
TestShapes.onCurveLessQuadShape(p)
| {
"repo_name": "adrientetar/robofab",
"path": "Lib/robofab/pens/pointPen.py",
"copies": "1",
"size": "5374",
"license": "bsd-3-clause",
"hash": -8305641667661883000,
"line_mean": 30.2441860465,
"line_max": 94,
"alpha_frac": 0.6924078898,
"autogenerated": false,
"ratio": 3.208358208955224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4400766098755224,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import AbstractPen
__all__ = ["TransformPen"]
class TransformPen(AbstractPen):
"""Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen.
"""
def __init__(self, outPen, transformation):
"""The 'outPen' argument is another pen object. It will receive the
transformed coordinates. The 'transformation' argument can either
be a six-tuple, or a fontTools.misc.transform.Transform object.
"""
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._outPen = outPen
self._stack = []
def moveTo(self, pt):
self._outPen.moveTo(self._transformPoint(pt))
def lineTo(self, pt):
self._outPen.lineTo(self._transformPoint(pt))
def curveTo(self, *points):
self._outPen.curveTo(*self._transformPoints(points))
def qCurveTo(self, *points):
if points[-1] is None:
points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def _transformPoints(self, points):
new = []
transformPoint = self._transformPoint
for pt in points:
new.append(transformPoint(pt))
return new
def closePath(self):
self._outPen.closePath()
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
| {
"repo_name": "shadowmint/nwidget",
"path": "lib/fonttools-2.3/Lib/fontTools/pens/transformPen.py",
"copies": "2",
"size": "1812",
"license": "apache-2.0",
"hash": -1308408506856271600,
"line_mean": 27.7619047619,
"line_max": 70,
"alpha_frac": 0.7168874172,
"autogenerated": false,
"ratio": 3.3065693430656933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023456760265694,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from .flatten import InputContour, OutputContour
from pyClipper import PolyClipper # XXX this isn't the real thing
"""
General Suggestions:
- Contours should only be sent here if they actually overlap.
This can be checked easily using contour bounds.
- Only perform operations on closed contours.
- contours must have an on curve point
- some kind of a log
"""
class BooleanOperationManager(object):
def _performOperation(self, operation, contours, outPen):
# prep the contours
inputContours = [InputContour(contour) for contour in contours]
# XXX temporary
clipperContours = []
for contour in inputContours:
clipperContours.append(dict(coordinates=contour.originalFlat))
clipper = PolyClipper.alloc().init()
resultContours = clipper.execute_operation_withOptions_(clipperContours, operation, dict(subjectFillType="noneZero", clipFillType="noneZero"))
# the temporary Clipper wrapper is very, very slow
# at converting back to Python structures. do it here
# so that the profiling of this can be isolated.
convertedContours = []
for contour in resultContours:
contour = [tuple(point) for point in contour]
convertedContours.append(contour)
resultContours = convertedContours
# /XXX
# convert to output contours
outputContours = [OutputContour(contour) for contour in resultContours]
# re-curve entire contour
for inputContour in inputContours:
for outputContour in outputContours:
if outputContour.final:
break
if outputContour.reCurveFromEntireInputContour(inputContour):
# the input is expired if a match was made,
# so stop passing it to the outputs
break
# re-curve segments
for inputContour in inputContours:
# skip contours that were comppletely used in the previous step
if inputContour.used:
continue
# XXX this could be expensive if an input becomes completely used
# it doesn't stop from being passed to the output
for outputContour in outputContours:
outputContour.reCurveFromInputContourSegments(inputContour)
# curve fit
for outputContour in outputContours:
outputContour.reCurveSubSegments(inputContours)
# output the results
for outputContour in outputContours:
outputContour.drawPoints(outPen)
# XXX return?
return outputContours
def union(self, contours, outPen):
# XXX return?
return self._performOperation("union", contours, outPen)
| {
"repo_name": "adrientetar/defcon",
"path": "Lib/defcon/tools/booleanOperations/__init__.py",
"copies": "1",
"size": "2811",
"license": "mit",
"hash": -5434237711397960000,
"line_mean": 40.9552238806,
"line_max": 150,
"alpha_frac": 0.6623977232,
"autogenerated": false,
"ratio": 4.385335413416537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5547733136616537,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from flatten import InputContour, OutputContour
from pyClipper import PolyClipper # XXX this isn't the real thing
"""
General Suggestions:
- Contours should only be sent here if they actually overlap.
This can be checked easily using contour bounds.
- Only perform operations on closed contours.
- contours must have an on curve point
- some kind of a log
"""
class BooleanOperationManager(object):
def _performOperation(self, operation, contours, outPen):
# prep the contours
inputContours = [InputContour(contour) for contour in contours]
# XXX temporary
clipperContours = []
for contour in inputContours:
clipperContours.append(dict(coordinates=contour.originalFlat))
clipper = PolyClipper.alloc().init()
resultContours = clipper.execute_operation_withOptions_(clipperContours, operation, dict(subjectFillType="noneZero", clipFillType="noneZero"))
# the temporary Clipper wrapper is very, very slow
# at converting back to Python structures. do it here
# so that the profiling of this can be isolated.
convertedContours = []
for contour in resultContours:
contour = [tuple(point) for point in contour]
convertedContours.append(contour)
resultContours = convertedContours
# /XXX
# convert to output contours
outputContours = [OutputContour(contour) for contour in resultContours]
# re-curve entire contour
for inputContour in inputContours:
for outputContour in outputContours:
if outputContour.final:
break
if outputContour.reCurveFromEntireInputContour(inputContour):
# the input is expired if a match was made,
# so stop passing it to the outputs
break
# re-curve segments
for inputContour in inputContours:
# skip contours that were comppletely used in the previous step
if inputContour.used:
continue
# XXX this could be expensive if an input becomes completely used
# it doesn't stop from being passed to the output
for outputContour in outputContours:
outputContour.reCurveFromInputContourSegments(inputContour)
# curve fit
for outputContour in outputContours:
outputContour.reCurveSubSegments(inputContours)
# output the results
for outputContour in outputContours:
outputContour.drawPoints(outPen)
# XXX return?
return outputContours
def union(self, contours, outPen):
# XXX return?
return self._performOperation("union", contours, outPen)
| {
"repo_name": "metapolator/mutatormathtools",
"path": "python_modules/lib/python/defcon/tools/booleanOperations/__init__.py",
"copies": "1",
"size": "2810",
"license": "apache-2.0",
"hash": 5444176425720641000,
"line_mean": 40.9402985075,
"line_max": 150,
"alpha_frac": 0.662633452,
"autogenerated": false,
"ratio": 4.390625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5553258452000001,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from flatten import InputContour, OutputContour
import pyClipper
"""
General Suggestions:
- Contours should only be sent here if they actually overlap.
This can be checked easily using contour bounds.
- Only perform operations on closed contours.
- contours must have an on curve point
- some kind of a log
"""
class BooleanOperationManager(object):
def _performOperation(self, operation, subjectContours, clipContours, outPen):
# prep the contours
subjectInputContours = [InputContour(contour) for contour in subjectContours if contour and len(contour) > 1]
clipInputContours = [InputContour(contour) for contour in clipContours if contour and len(contour) > 1]
inputContours = subjectInputContours + clipInputContours
resultContours = pyClipper.clipExecute([subjectInputContour.originalFlat for subjectInputContour in subjectInputContours],
[clipInputContour.originalFlat for clipInputContour in clipInputContours],
operation, subjectFillType="noneZero", clipFillType="noneZero")
# convert to output contours
outputContours = [OutputContour(contour) for contour in resultContours]
# re-curve entire contour
for inputContour in inputContours:
for outputContour in outputContours:
if outputContour.final:
continue
if outputContour.reCurveFromEntireInputContour(inputContour):
# the input is expired if a match was made,
# so stop passing it to the outputs
break
# re-curve segments
for inputContour in inputContours:
# skip contours that were comppletely used in the previous step
if inputContour.used:
continue
# XXX this could be expensive if an input becomes completely used
# it doesn't stop from being passed to the output
for outputContour in outputContours:
outputContour.reCurveFromInputContourSegments(inputContour)
# curve fit
for outputContour in outputContours:
outputContour.reCurveSubSegments(inputContours)
# output the results
for outputContour in outputContours:
outputContour.drawPoints(outPen)
return outputContours
def union(self, contours, outPen):
return self._performOperation("union", contours, [], outPen)
def difference(self, subjectContours, clipContours, outPen):
return self._performOperation("difference", subjectContours, clipContours, outPen)
def intersection(self, subjectContours, clipContours, outPen):
return self._performOperation("intersection", subjectContours, clipContours, outPen)
def xor(self, subjectContours, clipContours, outPen):
return self._performOperation("xor", subjectContours, clipContours, outPen)
def getIntersections(self, contours):
from flatten import _scalePoints, inverseClipperScale
# prep the contours
inputContours = [InputContour(contour) for contour in contours if contour and len(contour) > 1]
inputFlatPoints = set()
for contour in inputContours:
inputFlatPoints.update(contour.originalFlat)
resultContours = pyClipper.clipExecute([inputContour.originalFlat for inputContour in inputContours],
[],
"union", subjectFillType="noneZero", clipFillType="noneZero")
resultFlatPoints = set()
for contour in resultContours:
resultFlatPoints.update(contour)
intersections = resultFlatPoints - inputFlatPoints
return _scalePoints(intersections, inverseClipperScale)
| {
"repo_name": "moyogo/mutatorscale",
"path": "lib/mutatorScale/booleanOperations/booleanOperationManager.py",
"copies": "4",
"size": "3938",
"license": "mit",
"hash": -7595882248059536000,
"line_mean": 43.7613636364,
"line_max": 131,
"alpha_frac": 0.6635347892,
"autogenerated": false,
"ratio": 4.439684329199549,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7103219118399549,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect
from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
__all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax)
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.bounds = None
def _moveTo(self, pt):
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, pt)
else:
x, y = pt
self.bounds = (x, y, x, y)
def _lineTo(self, pt):
self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt):
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax)
"""
def _curveToOne(self, bcp1, bcp2, pt):
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds(
self._getCurrentPoint(), bcp1, bcp2, pt))
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds(
self._getCurrentPoint(), bcp, pt))
self.bounds = bounds
if __name__ == "__main__":
def draw(pen):
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100))
pen.closePath()
pen = ControlBoundsPen(None)
draw(pen)
print pen.bounds
pen = BoundsPen(None)
draw(pen)
print pen.bounds
| {
"repo_name": "shadowmint/nwidget",
"path": "lib/fonttools-2.3/Lib/fontTools/pens/boundsPen.py",
"copies": "2",
"size": "2576",
"license": "apache-2.0",
"hash": 6678536003582683000,
"line_mean": 26.6989247312,
"line_max": 75,
"alpha_frac": 0.700310559,
"autogenerated": false,
"ratio": 2.9814814814814814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9536258851750081,
"avg_score": 0.02910663774628004,
"num_lines": 93
} |
from fontTools.pens.basePen import BasePen
from functools import partial
from itertools import count
import sympy as sp
import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols('t x y', real=True)
c = sp.symbols('c', real=False) # Complex representation instead of x/y
X = tuple(sp.symbols('x:%d'%(n+1), real=True))
Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
C = tuple(sp.symbols('c:%d'%(n+1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
for i in range(1, n+1):
last = BinomialCoefficient[-1]
this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,)
BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this
BernsteinPolynomial = tuple(
tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs))
for n,coeffs in enumerate(BinomialCoefficient))
BezierCurve = tuple(
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
for j in range(2))
for n,bernsteins in enumerate(BernsteinPolynomial))
BezierCurveC = tuple(
sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins))
for n,bernsteins in enumerate(BernsteinPolynomial))
def green(f, curveXY):
f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x:curveXY[0], y:curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f
class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __missing__(self, i):
args = ['p%d'%d for d in range(i+1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen):
_BezierFuncs = {}
@classmethod
def _getGreenBezierFuncs(celf, func):
funcstr = str(func)
if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr]
def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func)
self.value = 0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def _lineTo(self, p1):
p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
# Use fontTools.pens.momentsPen.MomentsPen instead.
AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y)
MomentXXPen = partial(GreenPen, func=x*x)
MomentYYPen = partial(GreenPen, func=y*y)
MomentXYPen = partial(GreenPen, func=x*y)
def printGreenPen(penName, funcs, file=sys.stdout):
print(
'''from fontTools.pens.basePen import BasePen
class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
'''%penName, file=file)
for name,f in funcs:
print(' self.%s = 0' % name, file=file)
print('''
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
''', end='', file=file)
for n in (1, 2, 3):
if n == 1:
print('''
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
''', file=file)
elif n == 2:
print('''
def _qCurveToOne(self, p1, p2):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
''', file=file)
elif n == 3:
print('''
def _curveToOne(self, p1, p2, p3):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
''', file=file)
subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name,f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(greens,
optimizations='basic',
symbols=(sp.Symbol('r%d'%i) for i in count()))
for name,value in defs:
print(' %s = %s' % (name, value), file=file)
print(file=file)
for name,value in zip([f[0] for f in funcs], exprs):
print(' self.%s += %s' % (name, value), file=file)
print('''
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ['''%penName, file=file)
for name,f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(' ])', file=file)
if __name__ == '__main__':
pen = AreaPen()
pen.moveTo((100,100))
pen.lineTo((100,200))
pen.lineTo((200,200))
pen.curveTo((200,250),(300,300),(250,350))
pen.lineTo((200,100))
pen.closePath()
print(pen.value)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/misc/symfont.py",
"copies": "5",
"size": "5283",
"license": "apache-2.0",
"hash": -7949379261195984000,
"line_mean": 26.515625,
"line_max": 77,
"alpha_frac": 0.6520916146,
"autogenerated": false,
"ratio": 2.5399038461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5691995460753846,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from reportlab.graphics.shapes import Path
__all__ = ["ReportLabPen"]
class ReportLabPen(BasePen):
"""A pen for drawing onto a reportlab.graphics.shapes.Path object."""
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = Path()
self.path = path
def _moveTo(self, p):
(x,y) = p
self.path.moveTo(x,y)
def _lineTo(self, p):
(x,y) = p
self.path.lineTo(x,y)
def _curveToOne(self, p1, p2, p3):
(x1,y1) = p1
(x2,y2) = p2
(x3,y3) = p3
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self):
self.path.closePath()
if __name__=="__main__":
import sys
if len(sys.argv) < 3:
print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
print(" If no image file name is created, by default <glyphname>.png is created.")
print(" example: reportLabPen.py Arial.TTF R test.png")
print(" (The file format will be PNG, regardless of the image file name supplied)")
sys.exit(0)
from fontTools.ttLib import TTFont
from reportlab.lib import colors
path = sys.argv[1]
glyphName = sys.argv[2]
if (len(sys.argv) > 3):
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
gs = font.getGlyphSet()
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
w, h = g.width, 1000
from reportlab.graphics import renderPM
from reportlab.graphics.shapes import Group, Drawing, scale
# Everything is wrapped in a group to allow transformations.
g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
d = Drawing(w, h)
d.add(g)
renderPM.drawToFile(d, imageFile, fmt="PNG")
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/reportLabPen.py",
"copies": "5",
"size": "1779",
"license": "apache-2.0",
"hash": -3562843458421698000,
"line_mean": 23.3698630137,
"line_max": 86,
"alpha_frac": 0.6756604834,
"autogenerated": false,
"ratio": 2.6161764705882353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03694073500507296,
"num_lines": 73
} |
from fontTools.pens.basePen import BasePen
from robofab.misc.arrayTools import updateBounds, pointInRect, unionRect
from robofab.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
__all__ = ["BoundsPen", "ControlBoundsPen"]
class ControlBoundsPen(BasePen):
"""Pen to calculate the "control bounds" of a shape. This is the
bounding box of all control points __on closed paths__, so may be larger than the
actual bounding box if there are curves that don't have points
on their extremes.
Single points, or anchors, are ignored.
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax)
This replaces fontTools/pens/boundsPen (temporarily?)
The fontTools bounds pen takes lose anchor points into account,
this one doesn't.
"""
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self.bounds = None
self._start = None
def _moveTo(self, pt):
self._start = pt
def _addMoveTo(self):
if self._start is None:
return
bounds = self.bounds
if bounds:
self.bounds = updateBounds(bounds, self._start)
else:
x, y = self._start
self.bounds = (x, y, x, y)
self._start = None
def _lineTo(self, pt):
self._addMoveTo()
self.bounds = updateBounds(self.bounds, pt)
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp1)
bounds = updateBounds(bounds, bcp2)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, bcp)
bounds = updateBounds(bounds, pt)
self.bounds = bounds
class BoundsPen(ControlBoundsPen):
"""Pen to calculate the bounds of a shape. It calculates the
correct bounds even when the shape contains curves that don't
have points on their extremes. This is somewhat slower to compute
than the "control bounds".
When the shape has been drawn, the bounds are available as the
'bounds' attribute of the pen object. It's a 4-tuple:
(xMin, yMin, xMax, yMax)
"""
def _curveToOne(self, bcp1, bcp2, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
bounds = unionRect(bounds, calcCubicBounds(
self._getCurrentPoint(), bcp1, bcp2, pt))
self.bounds = bounds
def _qCurveToOne(self, bcp, pt):
self._addMoveTo()
bounds = self.bounds
bounds = updateBounds(bounds, pt)
if not pointInRect(bcp, bounds):
bounds = unionRect(bounds, calcQuadraticBounds(
self._getCurrentPoint(), bcp, pt))
self.bounds = bounds
| {
"repo_name": "anthrotype/robofab",
"path": "Lib/robofab/pens/boundsPen.py",
"copies": "9",
"size": "2689",
"license": "bsd-3-clause",
"hash": -3050847589876032000,
"line_mean": 27.3052631579,
"line_max": 82,
"alpha_frac": 0.7147638527,
"autogenerated": false,
"ratio": 3.1122685185185186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.06774892362206845,
"num_lines": 95
} |
from fontTools.pens.basePen import BasePen
from robofab.objects.objectsRF import RFont, RGlyph
from drawBot import *
class DrawBotPen(BasePen):
"""
A pen that draws a glyph into a drawbot.
I don't think this can deal with components or anything so be careful.
"""
def _moveTo(self, (x, y)):
moveTo((x, y))
def _lineTo(self, (x, y)):
lineTo((x, y))
def _curveToOne(self, (x1, y1), (x2, y2), (x3, y3)):
curveTo((x1, y1), (x2, y2), (x3, y3))
def closePath(self):
closePath()
def drawGlyph(glyph):
"""
Mimics the in-RoboFont drawbot, but using DrawBotPen.
"""
newPath()
pen = DrawBotPen(glyph.getParent())
glyph.draw(pen)
drawPath()
def _getKernGroups(groups):
leftKernGroups = {}
rightKernGroups = {}
leftKernNames = ['@public.kern1', '@MMK_L_']
rightKernNames = ['@public.kern2', '@MMK_R_']
for groupName, groupGlyphs in groups.items():
for name in leftKernNames:
if name in groupName:
leftKernGroups[groupName] = groupGlyphs
for name in rightKernNames:
if name in groupName:
rightKernGroups[groupName] = groupGlyphs
return leftKernGroups, rightKernGroups
def _getGlyphNamesFromTextString(textString, f, showMissing='.notdef'):
gnames = []
m = f.getCharacterMapping()
for char in textString:
gname = m.get(ord(char))
if gname:
gnames.append(gname[0])
elif showMissing:
if f.has_key(showMissing):
gnames.append(showMissing)
return gnames
def ufoText(textString, pos, font, fontSize, showMissing='.notdef', kerning=True, draw=True):
"""
A function that uses drawGlyph() to draw a string from a robofab object.
It acts like DrawBot's text() function, but you pass the font and fontSize directly.
It does not handle any advanced text layout or features. For that you
should generate a font and use compositor.
ufoText(u'Your text here.', (0, 0), font=f, fontSize=50, kerning=False)
"""
# get glyph names
gnames = _getGlyphNamesFromTextString(textString, font, showMissing)
# before we begin, get kerning
# there is probably a better way to do this
# but for now we will explode kerning once so we don't ahve to
if kerning:
explodedKerning = font.kerning.copy()
explodedKerning.explodeClasses(*_getKernGroups(f.groups))
save()
# move to the position
if draw: translate(*pos)
save()
# drawGlyph draws at 1 pt = 1 font unit.
scaleFactor = fontSize / font.info.unitsPerEm
if draw: scale(scaleFactor)
totalWidth = 0
# loop through glyphs and draw them
previousGname = None
for gname in gnames:
if kerning:
kern = explodedKerning.get((previousGname, gname)) or 0
translate(kern, 0)
totalWidth += kern
if draw: drawGlyph(font[gname])
if draw: translate(font[gname].width, 0)
totalWidth += font[gname].width
previousGname = gname
restore()
restore()
return totalWidth, fontSize
def ufoTextSize(textString, font, fontSize, kerning=True):
"""
Just like textSize, but using a UFO.
This is pretty dang inefficient, but such is life!
"""
return ufoText(textString, font, fontSize, draw=False) | {
"repo_name": "davelab6/drawbotlab",
"path": "glyph.py",
"copies": "1",
"size": "3424",
"license": "mit",
"hash": -4122746093864195600,
"line_mean": 31.619047619,
"line_max": 93,
"alpha_frac": 0.629088785,
"autogenerated": false,
"ratio": 3.515400410677618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4644489195677618,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from robofab.pens.pointPen import AbstractPointPen
def calcArea(points):
l = len(points)
area = 0
for i in xrange(l):
x1, y1 = points[i]
x2, y2 = points[(i+1)%l]
area += (x1*y2)-(x2*y1)
return area / 2
class FilterPointPen(AbstractPointPen):
def __init__(self, glyphSet=None):
self.glyphSet = glyphSet
self.contours = []
self.components = []
def beginPath(self):
self.currentContour = []
def addPoint(self, pt, segmentType=None, smooth=False, name=None, *args, **kwargs):
point = {
'pt': pt,
'segmentType': segmentType,
'smooth': smooth,
'name': name
}
self.currentContour.append(point)
def endPath(self):
area = calcArea([point['pt'] for point in self.currentContour])
if abs(area) >= 25:
self.contours.append(self.currentContour)
def addComponent(self, baseGlyphName, transformation):
self.components.append((baseGlyphName, transformation))
def extract(self, pointPen):
for baseGlyphName, transformation in self.components:
pointPen.addComponent(baseGlyphName, transformation)
for contour in self.contours:
pointPen.beginPath()
for point in contour:
pointPen.addPoint(**point)
pointPen.endPath()
class CollectComponentsPen(BasePen):
def __init__(self):
self.components = []
def ignore(self, *args):
pass
_moveTo = _lineTo = _curveToOne = endPath = closePath = ignore
def addComponent(self, baseGlyphName, transformation):
self.components.append((baseGlyphName, transformation))
def get(self):
return self.components
class CounterPen(BasePen):
def __init__(self):
self.pointCount = 0
def _moveTo(self, pt):
self.pointCount += 1
def _lineTo(self, pt):
self.pointCount += 1
def _curveToOne(self, pt1, pt2, pt3):
self.pointCount += 1
def endPath(self):
pass
def closePath(self):
pass
def getPointCount(self):
return self.pointCount
def addComponent(self, baseGlyphName, transformation):
pass
| {
"repo_name": "loicsander/Robofont-scripts",
"path": "PenBallWizard/PenBallWizard.roboFontExt/lib/penBallWizard/objects/penUtils.py",
"copies": "2",
"size": "2292",
"license": "mit",
"hash": -7805557958314122000,
"line_mean": 23.6451612903,
"line_max": 87,
"alpha_frac": 0.6042757417,
"autogenerated": false,
"ratio": 3.8456375838926173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002432099743927701,
"num_lines": 93
} |
from fontTools.pens.basePen import BasePen
from robofab.pens.pointPen import AbstractPointPen
def calcArea(points):
l = len(points)
area = 0
for i in xrange(l):
x1, y1 = points[i]
x2, y2 = points[(i+1)%l]
area += (x1*y2)-(x2*y1)
return area / 2
def overlap((x1, y1), (x2, y2), threshold=2):
return -threshold < x2 - x1 < threshold and -threshold < y2 - y1 < threshold
class CleanPointPen(AbstractPointPen):
"""
Draws a glyph and filters out points and/or contours
that are matched by threshold values *areaThreshold* and *overlapThreshold*.
*areaThreshold* defines the minimal area for a contour to be filtered out.
*overlapThreshold* defines the distance between two points for them to be considered overlapping.
"""
def __init__(self, otherPen, areaThreshold=1000, overlapThreshold=2, removeOverlappingPoints=False):
self.otherPen = otherPen
self.areaThreshold = areaThreshold
self.overlapThreshold = overlapThreshold
self.removeOverlappingPoints = removeOverlappingPoints
self.contours = []
self.components = []
def beginPath(self):
self.currentContour = []
def addPoint(self, pt, segmentType=None, smooth=False, name=None, *args, **kwargs):
previousPoint = self.currentContour[-1] if len(self.currentContour) else None
point = {
'pt': pt,
'segmentType': segmentType,
'smooth': smooth,
'name': name
}
if (previousPoint is None) or \
(previousPoint is not None and point['segmentType'] is None) or \
(previousPoint is not None and self.removeOverlappingPoints == True and (point['segmentType'] is not None and previousPoint['segmentType'] is not None and not overlap(point['pt'], previousPoint['pt'], self.overlapThreshold))) or\
self.removeOverlappingPoints == False:
self.currentContour.append(point)
def endPath(self):
self._flushContour()
def addComponent(self, baseGlyphName, transformation):
self.otherPen.addComponent(baseGlyphName, transformation)
def _flushContour(self):
area = calcArea([point['pt'] for point in self.currentContour if point['segmentType'] is not None])
if abs(area) >= self.areaThreshold:
self.draw(self.currentContour)
def draw(self, contour):
pen = self.otherPen
close = not contour[0]['segmentType'] == 'move'
pen.moveTo(contour[0]['pt'])
for i, point in enumerate(contour[1:]):
i += 1
if point['segmentType'] == 'line':
pen.lineTo(point['pt'])
elif point['segmentType'] == 'curve':
c1, c2 = contour[i-2]['pt'], contour[i-1]['pt']
pen.curveTo(c1, c2, point['pt'])
if close == True:
pen.closePath()
elif close == False:
pen.endPath() | {
"repo_name": "loicsander/RobofabPens",
"path": "cleanPointPen.py",
"copies": "1",
"size": "2967",
"license": "mit",
"hash": -2075023807430832000,
"line_mean": 32.3483146067,
"line_max": 240,
"alpha_frac": 0.6204920795,
"autogenerated": false,
"ratio": 3.909090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5029582988590909,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
def roundInt(v):
return int(round(v))
def roundIntPoint(xxx_todo_changeme):
(x, y) = xxx_todo_changeme
return roundInt(x), roundInt(y)
class RelativeCoordinatePen(BasePen):
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self._lastX = None
self._lastY = None
self._heldAbsoluteMove = None
def _makePointRelative(self, pt):
absX, absY = pt
absX = absX
absY = absY
# no points have been added
# so no conversion is needed
if self._lastX is None:
relX, relY = absX, absY
# otherwise calculate the relative coordinates
else:
relX = absX - self._lastX
relY = absY - self._lastY
# store the absolute coordinates
self._lastX = absX
self._lastY = absY
# now return the relative coordinates
return relX, relY
def _moveTo(self, pt):
self._heldAbsoluteMove = pt
def _releaseHeldMove(self):
if self._heldAbsoluteMove is not None:
pt = self._makePointRelative(self._heldAbsoluteMove)
self._relativeMoveTo(pt)
self._heldAbsoluteMove = None
def _relativeMoveTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
self._releaseHeldMove()
pt = self._makePointRelative(pt)
self._relativeLineTo(pt)
def _relativeLineTo(self, pt):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
self._releaseHeldMove()
pt1 = self._makePointRelative(pt1)
pt2 = self._makePointRelative(pt2)
pt3 = self._makePointRelative(pt3)
self._relativeCurveToOne(pt1, pt2, pt3)
def _relativeCurveToOne(self, pt1, pt2, pt3):
raise NotImplementedError | {
"repo_name": "adrientetar/ufo2fdk",
"path": "Lib/ufo2fdk/pens/__init__.py",
"copies": "1",
"size": "1862",
"license": "mit",
"hash": 7152472041458540000,
"line_mean": 27.6615384615,
"line_max": 64,
"alpha_frac": 0.6068743287,
"autogenerated": false,
"ratio": 3.807770961145194,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4914645289845194,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
def roundInt(v):
return int(round(v))
def roundIntPoint((x, y)):
return roundInt(x), roundInt(y)
class RelativeCoordinatePen(BasePen):
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self._lastX = None
self._lastY = None
self._heldAbsoluteMove = None
def _makePointRelative(self, pt):
absX, absY = pt
absX = absX
absY = absY
# no points have been added
# so no conversion is needed
if self._lastX is None:
relX, relY = absX, absY
# otherwise calculate the relative coordinates
else:
relX = absX - self._lastX
relY = absY - self._lastY
# store the absolute coordinates
self._lastX = absX
self._lastY = absY
# now return the relative coordinates
return relX, relY
def _moveTo(self, pt):
self._heldAbsoluteMove = pt
def _releaseHeldMove(self):
if self._heldAbsoluteMove is not None:
pt = self._makePointRelative(self._heldAbsoluteMove)
self._relativeMoveTo(pt)
self._heldAbsoluteMove = None
def _relativeMoveTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
self._releaseHeldMove()
pt = self._makePointRelative(pt)
self._relativeLineTo(pt)
def _relativeLineTo(self, pt):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
self._releaseHeldMove()
pt1 = self._makePointRelative(pt1)
pt2 = self._makePointRelative(pt2)
pt3 = self._makePointRelative(pt3)
self._relativeCurveToOne(pt1, pt2, pt3)
def _relativeCurveToOne(self, pt1, pt2, pt3):
raise NotImplementedError
| {
"repo_name": "anthrotype/ufo2fdk",
"path": "Lib/ufo2fdk/pens/__init__.py",
"copies": "3",
"size": "1821",
"license": "mit",
"hash": -9026704940833186000,
"line_mean": 27.453125,
"line_max": 64,
"alpha_frac": 0.6040637013,
"autogenerated": false,
"ratio": 3.8417721518987342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007747821350762528,
"num_lines": 64
} |
from fontTools.pens.basePen import BasePen
from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint
from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint
from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath
__all__ = ["QuartzPen"]
class QuartzPen(BasePen):
"""A pen that creates a CGPath
Parameters
- path: an optional CGPath to add to
- xform: an optional CGAffineTransform to apply to the path
"""
def __init__(self, glyphSet, path=None, xform=None):
BasePen.__init__(self, glyphSet)
if path is None:
path = CGPathCreateMutable()
self.path = path
self.xform = xform
def _moveTo(self, pt):
x, y = pt
CGPathMoveToPoint(self.path, self.xform, x, y)
def _lineTo(self, pt):
x, y = pt
CGPathAddLineToPoint(self.path, self.xform, x, y)
def _curveToOne(self, p1, p2, p3):
(x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
def _qCurveToOne(self, p1, p2):
(x1, y1), (x2, y2) = p1, p2
CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
def _closePath(self):
CGPathCloseSubpath(self.path)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/quartzPen.py",
"copies": "5",
"size": "1175",
"license": "apache-2.0",
"hash": -9118575459885220000,
"line_mean": 25.1111111111,
"line_max": 77,
"alpha_frac": 0.7072340426,
"autogenerated": false,
"ratio": 2.6345291479820627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5841763190582063,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
class ReportLabPen(BasePen):
"""A pen for drawing onto a reportlab.graphics.shapes.Path object."""
def __init__(self, glyphSet, path=None):
BasePen.__init__(self, glyphSet)
if path is None:
from reportlab.graphics.shapes import Path
path = Path()
self.path = path
def _moveTo(self, (x,y)):
self.path.moveTo(x,y)
def _lineTo(self, (x,y)):
self.path.lineTo(x,y)
def _curveToOne(self, (x1,y1), (x2,y2), (x3,y3)):
self.path.curveTo(x1, y1, x2, y2, x3, y3)
def _closePath(self):
self.path.closePath()
if __name__=="__main__":
import sys
if len(sys.argv) < 3:
print "Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]"
print " If no image file name is created, by default <glyphname>.png is created."
print " example: reportLabPen.py Arial.TTF R test.png"
print " (The file format will be PNG, regardless of the image file name supplied)"
sys.exit(0)
from fontTools.ttLib import TTFont
from reportlab.lib import colors
from reportlab.graphics.shapes import Path
path = sys.argv[1]
glyphName = sys.argv[2]
if (len(sys.argv) > 3):
imageFile = sys.argv[3]
else:
imageFile = "%s.png" % glyphName
font = TTFont(path) # it would work just as well with fontTools.t1Lib.T1Font
gs = font.getGlyphSet()
pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
g = gs[glyphName]
g.draw(pen)
w, h = g.width, 1000
from reportlab.graphics import renderPM
from reportlab.graphics.shapes import Group, Drawing, scale
# Everything is wrapped in a group to allow transformations.
g = Group(pen.path)
g.translate(0, 200)
g.scale(0.3, 0.3)
d = Drawing(w, h)
d.add(g)
renderPM.drawToFile(d, imageFile, fmt="PNG")
| {
"repo_name": "shadowmint/nwidget",
"path": "lib/fonttools-2.3/Lib/fontTools/pens/reportLabPen.py",
"copies": "2",
"size": "1747",
"license": "apache-2.0",
"hash": -1895581856898464800,
"line_mean": 25.4696969697,
"line_max": 85,
"alpha_frac": 0.6914710933,
"autogenerated": false,
"ratio": 2.7127329192546585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4404204012554659,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
def pointToString(pt):
return " ".join([str(i) for i in pt])
class SVGPathPen(BasePen):
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self._commands = []
self._lastCommand = None
self._lastX = None
self._lastY = None
def _handleAnchor(self):
"""
>>> pen = SVGPathPen(None)
>>> pen.moveTo((0, 0))
>>> pen.moveTo((10, 10))
>>> pen._commands
['M10 10']
"""
if self._lastCommand == "M":
self._commands.pop(-1)
def _moveTo(self, pt):
"""
>>> pen = SVGPathPen(None)
>>> pen.moveTo((0, 0))
>>> pen._commands
['M0 0']
>>> pen = SVGPathPen(None)
>>> pen.moveTo((10, 0))
>>> pen._commands
['M10 0']
>>> pen = SVGPathPen(None)
>>> pen.moveTo((0, 10))
>>> pen._commands
['M0 10']
"""
self._handleAnchor()
t = "M%s" % (pointToString(pt))
self._commands.append(t)
self._lastCommand = "M"
self._lastX, self._lastY = pt
def _lineTo(self, pt):
"""
# duplicate point
>>> pen = SVGPathPen(None)
>>> pen.moveTo((10, 10))
>>> pen.lineTo((10, 10))
>>> pen._commands
['M10 10']
# vertical line
>>> pen = SVGPathPen(None)
>>> pen.moveTo((10, 10))
>>> pen.lineTo((10, 0))
>>> pen._commands
['M10 10', 'V0']
# horizontal line
>>> pen = SVGPathPen(None)
>>> pen.moveTo((10, 10))
>>> pen.lineTo((0, 10))
>>> pen._commands
['M10 10', 'H0']
# basic
>>> pen = SVGPathPen(None)
>>> pen.lineTo((70, 80))
>>> pen._commands
['L70 80']
# basic following a moveto
>>> pen = SVGPathPen(None)
>>> pen.moveTo((0, 0))
>>> pen.lineTo((10, 10))
>>> pen._commands
['M0 0', ' 10 10']
"""
x, y = pt
# duplicate point
if x == self._lastX and y == self._lastY:
return
# vertical line
elif x == self._lastX:
cmd = "V"
pts = str(y)
# horizontal line
elif y == self._lastY:
cmd = "H"
pts = str(x)
# previous was a moveto
elif self._lastCommand == "M":
cmd = None
pts = " " + pointToString(pt)
# basic
else:
cmd = "L"
pts = pointToString(pt)
# write the string
t = ""
if cmd:
t += cmd
self._lastCommand = cmd
t += pts
self._commands.append(t)
# store for future reference
self._lastX, self._lastY = pt
def _curveToOne(self, pt1, pt2, pt3):
"""
>>> pen = SVGPathPen(None)
>>> pen.curveTo((10, 20), (30, 40), (50, 60))
>>> pen._commands
['C10 20 30 40 50 60']
"""
t = "C"
t += pointToString(pt1) + " "
t += pointToString(pt2) + " "
t += pointToString(pt3)
self._commands.append(t)
self._lastCommand = "C"
self._lastX, self._lastY = pt3
def _qCurveToOne(self, pt1, pt2):
"""
>>> pen = SVGPathPen(None)
>>> pen.qCurveTo((10, 20), (30, 40))
>>> pen._commands
['Q10 20 30 40']
"""
assert pt2 is not None
t = "Q"
t += pointToString(pt1) + " "
t += pointToString(pt2)
self._commands.append(t)
self._lastCommand = "Q"
self._lastX, self._lastY = pt2
def _closePath(self):
"""
>>> pen = SVGPathPen(None)
>>> pen.closePath()
>>> pen._commands
['Z']
"""
self._commands.append("Z")
self._lastCommand = "Z"
self._lastX = self._lastY = None
def _endPath(self):
"""
>>> pen = SVGPathPen(None)
>>> pen.endPath()
>>> pen._commands
['Z']
"""
self._closePath()
self._lastCommand = None
self._lastX = self._lastY = None
def getCommands(self):
return "".join(self._commands)
if __name__ == "__main__":
import sys
import doctest
sys.exit(doctest.testmod().failed)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/svgPathPen.py",
"copies": "5",
"size": "4373",
"license": "apache-2.0",
"hash": -6243966834895561000,
"line_mean": 23.8465909091,
"line_max": 53,
"alpha_frac": 0.4440887263,
"autogenerated": false,
"ratio": 3.6655490360435876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6609637762343588,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
def roundInt(v):
return int(round(v))
def roundIntPoint(xy):
x, y = xy
return roundInt(x), roundInt(y)
class RelativeCoordinatePen(BasePen):
def __init__(self, glyphSet):
BasePen.__init__(self, glyphSet)
self._lastX = None
self._lastY = None
self._heldAbsoluteMove = None
def _makePointRelative(self, pt):
absX, absY = pt
absX = absX
absY = absY
# no points have been added
# so no conversion is needed
if self._lastX is None:
relX, relY = absX, absY
# otherwise calculate the relative coordinates
else:
relX = absX - self._lastX
relY = absY - self._lastY
# store the absolute coordinates
self._lastX = absX
self._lastY = absY
# now return the relative coordinates
return relX, relY
def _moveTo(self, pt):
self._heldAbsoluteMove = pt
def _releaseHeldMove(self):
if self._heldAbsoluteMove is not None:
pt = self._makePointRelative(self._heldAbsoluteMove)
self._relativeMoveTo(pt)
self._heldAbsoluteMove = None
def _relativeMoveTo(self, pt):
raise NotImplementedError
def _lineTo(self, pt):
self._releaseHeldMove()
pt = self._makePointRelative(pt)
self._relativeLineTo(pt)
def _relativeLineTo(self, pt):
raise NotImplementedError
def _curveToOne(self, pt1, pt2, pt3):
self._releaseHeldMove()
pt1 = self._makePointRelative(pt1)
pt2 = self._makePointRelative(pt2)
pt3 = self._makePointRelative(pt3)
self._relativeCurveToOne(pt1, pt2, pt3)
def _relativeCurveToOne(self, pt1, pt2, pt3):
raise NotImplementedError | {
"repo_name": "typesupply/ufo2fdk",
"path": "Lib/ufo2fdk/pens/__init__.py",
"copies": "3",
"size": "1824",
"license": "mit",
"hash": 3833396243699909000,
"line_mean": 26.2388059701,
"line_max": 64,
"alpha_frac": 0.6052631579,
"autogenerated": false,
"ratio": 3.8238993710691824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5929162528969183,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.basePen import BasePen
from robofab.objects.objectsRF import RFont, RGlyph
from drawBot import *
class DrawBotPen(BasePen):
"""
A pen that draws a glyph into a drawbot.
I don't think this can deal with components or anything so be careful.
"""
def _moveTo(self, coordinate):
moveTo(coordinate)
def _lineTo(self, coordinate):
lineTo(coordinate)
def _curveToOne(self, coordinate1, coordinate2, coordinate3):
curveTo(coordinate1, coordinate2, coordinate3)
def closePath(self):
closePath()
def drawGlyph(glyph):
"""
Mimics the in-RoboFont drawbot, but using DrawBotPen.
"""
newPath()
pen = DrawBotPen(glyph.getParent())
glyph.draw(pen)
drawPath()
def _getKernGroups(groups):
leftKernGroups = {}
rightKernGroups = {}
leftKernNames = ['@public.kern1', '@MMK_L_']
rightKernNames = ['@public.kern2', '@MMK_R_']
for groupName, groupGlyphs in groups.items():
for name in leftKernNames:
if name in groupName:
leftKernGroups[groupName] = groupGlyphs
for name in rightKernNames:
if name in groupName:
rightKernGroups[groupName] = groupGlyphs
return leftKernGroups, rightKernGroups
def _getGlyphNamesFromTextString(textString, f, showMissing='.notdef'):
gnames = []
m = f.getCharacterMapping()
for char in textString:
gname = m.get(ord(char))
if gname:
gnames.append(gname[0])
elif showMissing:
if f.has_key(showMissing):
gnames.append(showMissing)
return gnames
def ufoText(textString, pos, font, fontSize, showMissing='.notdef', kerning=True, draw=True):
"""
A function that uses drawGlyph() to draw a string from a robofab object.
It acts like DrawBot's text() function, but you pass the font and fontSize directly.
It does not handle any advanced text layout or features. For that you
should generate a font and use compositor.
ufoText(u'Your text here.', (0, 0), font=f, fontSize=50, kerning=False)
"""
# get glyph names
gnames = _getGlyphNamesFromTextString(textString, font, showMissing)
# before we begin, get kerning
# there is probably a better way to do this
# but for now we will explode kerning once so we don't ahve to
if kerning:
explodedKerning = font.kerning.copy()
explodedKerning.explodeClasses(*_getKernGroups(font.groups))
save()
# move to the position
if draw: translate(*pos)
save()
# drawGlyph draws at 1 pt = 1 font unit.
scaleFactor = fontSize / font.info.unitsPerEm
if draw: scale(scaleFactor)
totalWidth = 0
# loop through glyphs and draw them
previousGname = None
for gname in gnames:
if kerning:
kern = explodedKerning.get((previousGname, gname)) or 0
translate(kern, 0)
totalWidth += kern
if draw: drawGlyph(font[gname])
if draw: translate(font[gname].width, 0)
totalWidth += font[gname].width
previousGname = gname
restore()
restore()
return totalWidth, fontSize
def ufoTextSize(textString, font, fontSize, kerning=True):
"""
Just like textSize, but using a UFO.
This is pretty dang inefficient, but such is life!
"""
return ufoText(textString, font, fontSize, draw=False)
### BY JUST ####
import AppKit
_methodMap = {
AppKit.NSMoveToBezierPathElement: "moveTo",
AppKit.NSLineToBezierPathElement: "lineTo",
AppKit.NSCurveToBezierPathElement: "curveTo",
AppKit.NSClosePathBezierPathElement: "closePath",
}
def drawPathToPen(path, pen):
didClosePath = True
for i in range(path._path.elementCount()):
instr, pts = path._path.elementAtIndex_associatedPoints_(i)
methodName = _methodMap[instr]
if methodName == "moveTo":
if not didClosePath:
# Previous contour was open, we should call pen.endPath()
pen.endPath()
didClosePath = False
elif methodName == "closePath":
didClosePath = True
getattr(pen, methodName)(*pts)
if not didClosePath:
# The final subpath is open, we must still call endPath()
pen.endPath()
| {
"repo_name": "djrrb/drawbotlab",
"path": "glyph.py",
"copies": "1",
"size": "4496",
"license": "mit",
"hash": -1362919271372811000,
"line_mean": 31.3037037037,
"line_max": 93,
"alpha_frac": 0.6212188612,
"autogenerated": false,
"ratio": 3.790893760539629,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49121126217396294,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.boundsPen import ControlBoundsPen, BoundsPen
from fontTools.misc.arrayTools import unionRect
from defcon.pens.clockwiseTestPointPen import ClockwiseTestPointPen
# -----
# Glyph
# -----
def glyphBoundsRepresentationFactory(glyph):
bounds = None
for group in (glyph, glyph.components):
for obj in group:
b = obj.bounds
if b is not None:
if bounds is None:
bounds = b
else:
bounds = unionRect(bounds, b)
return bounds
def glyphControlPointBoundsRepresentationFactory(glyph):
bounds = None
for group in (glyph, glyph.components):
for obj in group:
b = obj.controlPointBounds
if b is not None:
if bounds is None:
bounds = b
else:
bounds = unionRect(bounds, b)
return bounds
# -------
# Contour
# -------
# bounds
def contourBoundsRepresentationFactory(obj):
pen = BoundsPen(None)
obj.draw(pen)
return pen.bounds
def contourControlPointBoundsRepresentationFactory(obj):
pen = ControlBoundsPen(None)
obj.draw(pen)
return pen.bounds
# winding direction
def contourClockwiseRepresentationFactory(contour):
pen = ClockwiseTestPointPen()
contour.drawPoints(pen)
return pen.getIsClockwise()
| {
"repo_name": "metapolator/mutatormathtools",
"path": "python_modules/lib/python/defcon/tools/representations.py",
"copies": "2",
"size": "1376",
"license": "apache-2.0",
"hash": 3387940153627618300,
"line_mean": 24.4814814815,
"line_max": 67,
"alpha_frac": 0.6264534884,
"autogenerated": false,
"ratio": 4.220858895705521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5847312384105521,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.cocoaPen import CocoaPen
from robofab.interface.all.dialogs import GetFile
from robofab.world import OpenFont
import datetime
now = datetime.datetime.now().strftime('%d %B %Y - %H:%M')
def _drawGlyph(glyph):
pen = CocoaPen(glyph.getParent())
glyph.draw(pen)
drawPath(pen.path)
def pageStamp((x, y), pageWidth, thisFont, infoFont):
font(infoFont)
fileName = thisFont.path.split('/')[-1]
fontName = thisFont.info.familyName + u' – ' + thisFont.info.styleName
save()
fontSize(7)
fill(0)
translate(x, y/2)
column = pageWidth/6
textBox(fontName, (0, 0, 2*column, 10))
textBox(fileName, (2*column, 0, 3*column, 10), 'center')
textBox(now, (5*column, 0, column, 10), 'right')
restore()
def getGlyphOrder(thisFont):
if hasattr(thisFont, 'glyphOrder'):
return thisFont.glyphOrder
elif thisFont.lib.has_key('public.glyphOrder'):
return thisFont.lib.get('public.glyphOrder')
ufoPath = GetFile('choose a UFO')
thisFont = OpenFont(ufoPath)
pageWidth = 595.276
pageHeight = 841.89
showMetrics = True
showName = True
showFrame = False
showCell = False
showGlyphBox = False
frameBackgroundColor = (.1, .3, .5, .25)
frameBorderColor = (0, 0, 0, .75)
glyphFillColor = (0, 0, 0, 1)
glyphStrokeColor= (0, 0, 0, 0)
glyphStrokeWidth = 0
cellBackgroundColor = (.1, .3, .5, .25)
cellBorderColor = (0, 0, 0, .5)
glyphNameSize = 7
glyphNameColor = (0, 0, 0, 1)
metricsPointSize = 5
metricsFillColor = (0, 0, 0, 1)
# number of columns
col = 6
# size of each glyph
pointSize = 40
# font to display infos
infoFont = ''
# page margins: left top right bottom
margins = (20, 30, 20, 30)
boxMargins = (0, 0, 0, 0)
innerWidth = pageWidth - (margins[0]+margins[2])
innerHeight = pageHeight - (margins[1]+margins[3])
boxRatio = 1.5
xLoc = 0.5
yLoc = 0.5
boxWidth = innerWidth/col
boxHeight = boxWidth * boxRatio
innerBoxWidth = boxWidth - boxMargins[1] - boxMargins[3]
innerBoxHeight = boxHeight - boxMargins[0] - boxMargins[2]
glyphOrder = getGlyphOrder(thisFont)
fontKeys = thisFont.keys()
UPM = thisFont.info.unitsPerEm
xHeight = thisFont.info.xHeight
capHeight = thisFont.info.capHeight
ascender = thisFont.info.ascender
descender = thisFont.info.descender
sc = pointSize / UPM
glyphsPerPage = col * int(innerHeight/(boxHeight))
glyphs = [thisFont[glyphName] for glyphName in glyphOrder if glyphName in fontKeys]
for i, glyph in enumerate(glyphs):
i = i%glyphsPerPage
if i == 0:
newPage(pageWidth, pageHeight)
pageStamp((margins[0], margins[3]), innerWidth, thisFont, infoFont)
if showFrame:
## page frame
stroke(*frameBorderColor)
fill(*frameBackgroundColor)
rect(margins[0], margins[3], innerWidth, innerHeight)
# defining margins and starting from top of the page
translate(margins[0], margins[3]+innerHeight)
save()
lineCount = int(i/col)
xPos = boxWidth*(i%col)
yPos = boxHeight*(lineCount+1)
if showCell:
## tracing glyph cell
save()
stroke(*cellBorderColor)
fill(*cellBackgroundColor)
strokeWidth(.25)
rect(xPos+boxMargins[3], -yPos+boxMargins[0], innerBoxWidth, innerBoxHeight)
restore()
### glyph position in table
translate(xPos+boxMargins[3]+((innerBoxWidth*xLoc)-((glyph.width*xLoc)*sc)), -yPos+boxMargins[2]+((innerBoxHeight*(1-yLoc))-(UPM*(1-yLoc)*sc)))
## drawing glyph
### shifting inside glyph box
scale(sc)
translate(0, -descender)
if showGlyphBox:
save()
# bounding box
stroke(0.75)
strokeWidth(1)
rect(0, descender, glyph.width, UPM)
# baseline
stroke(0.5, 0.5, 0.75)
strokeWidth(3)
line((0, 0), (glyph.width,0))
# xHeight
stroke(0.75, 0.5, 0.5)
strokeWidth(3)
line((0, xHeight), (glyph.width, xHeight))
# capHeight
stroke(0.5, 0.75, 0.5)
strokeWidth(3)
line((0, capHeight), (glyph.width, capHeight))
restore()
stroke(*glyphStrokeColor)
strokeWidth(glyphStrokeWidth)
fill(*glyphFillColor)
_drawGlyph(glyph)
if showMetrics:
save()
scale(1/sc)
stroke()
fill(*metricsFillColor)
font(infoFont)
fontSize(metricsPointSize)
surrounding = ((innerBoxWidth*xLoc)-((glyph.width*xLoc)*sc))
textBox(str(int(glyph.leftMargin)), (-surrounding, (xHeight*sc)/2, surrounding, metricsPointSize), align='center')
textBox(str(int(glyph.rightMargin)), (glyph.width*sc, (xHeight*sc)/2, surrounding, metricsPointSize), align='center')
textBox(str(int(glyph.width)), (0, ((descender-100)*sc)-metricsPointSize, glyph.width*sc, metricsPointSize), 'center')
restore()
restore()
if showName:
save()
translate(xPos, -yPos)
fill(*glyphNameColor)
font(infoFont)
fontSize(glyphNameSize)
textBox(glyph.name.upper(), (0, boxMargins[2]+glyphNameSize, boxWidth, glyphNameSize), 'center')
restore()
fileName = thisFont.path.split('/')[-1]
ufoFolder = thisFont.path.rstrip(fileName)
saveImage(ufoFolder + fileName + '_CHARSET.pdf') | {
"repo_name": "loicsander/Font2PDF",
"path": "DrawBot/DB-charset.py",
"copies": "1",
"size": "5316",
"license": "mit",
"hash": -5128658040623836000,
"line_mean": 27.422459893,
"line_max": 147,
"alpha_frac": 0.6392547986,
"autogenerated": false,
"ratio": 3.2089371980676327,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9252490011872433,
"avg_score": 0.01914039695903997,
"num_lines": 187
} |
from fontTools.pens.cocoaPen import CocoaPen
# Some configuration
page_format = 'A4' # See http://drawbot.readthedocs.org/content/canvas/pages.html#size for other size-values
my_selection = CurrentFont() # May also be CurrentFont.selection or else
pdf_filepath = '~/Desktop/AllMyGlyphs.pdf'
# Init
font = CurrentFont()
size(page_format)
page_width = width()
page_height = height()
# Drawbox Settings
drawbox = {}
drawbox['left_margin'] = 50
drawbox['top_margin'] = 50
drawbox['right_margin'] = 50
drawbox['bottom_margin'] = 50
drawbox['xMin'] = drawbox['left_margin']
drawbox['yMin'] = drawbox['bottom_margin']
drawbox['xMax'] = page_width - drawbox['right_margin']
drawbox['yMax'] = page_width - drawbox['top_margin']
drawbox['width'] = page_width - drawbox['left_margin'] - drawbox['right_margin']
drawbox['height'] = page_height - drawbox['bottom_margin'] - drawbox['top_margin']
def showPageMargins():
fill(None)
stroke(0, 0, 1, .1)
rect(drawbox['xMin'], drawbox['yMin'], drawbox['width'], drawbox['height'])
class RegisterGlyph(object):
def __init__(self, glyph):
self.glyph = glyph
#print 'Registered glyph:', self.glyph.name
self.getGlyphSizeData()
def getGlyphSizeData(self):
self.xMin, self.yMin, self.xMax, self.yMax = self.glyph.box
self.w = self.xMax - self.xMin
self.h = self.yMax - self.yMin
self.xHeight_pos = xHeight + abs(descender)
self.capHeight_pos = capHeight + abs(descender)
self.x_pos = self.glyph.leftMargin + drawbox['xMin']
#print self.xMin, self.yMin, self.xMax, self.yMax
def getScale(self):
if self.w > self.h:
return drawbox['width']/self.w
else:
return 1
#return drawbox['height']/self.h
def drawBoundingBox(self):
stroke(255,0,0)
fill(None)
rect(drawbox['xMin'], drawbox['yMin'], self.glyph.width*sc, UPM*sc)
def drawXHeight(self):
stroke(255, 0,0)
line((drawbox['xMin'], drawbox['yMin'] + self.xHeight_pos*sc), (drawbox['xMin'] + self.glyph.width*sc, drawbox['yMin'] + self.xHeight_pos*sc))
def drawCapHeight(self):
stroke(255,0,0)
line((drawbox['xMin'], drawbox['yMin'] + self.capHeight_pos*sc), (drawbox['xMin'] + self.glyph.width*sc, drawbox['yMin'] + self.capHeight_pos*sc))
def drawBaseline(self):
stroke(255, 0, 0)
line((drawbox['xMin'], drawbox['yMin'] + abs(descender)*sc), (drawbox['xMin'] + self.glyph.width*sc, drawbox['yMin'] + abs(descender)*sc))
def drawLeftMargin(self):
stroke(None)
fill(255,0,0,0.5)
rect(drawbox['xMin'], drawbox['yMin'], self.glyph.leftMargin*sc, UPM*sc)
def drawRightMargin(self):
stroke(None)
fill(255,0,0,0.5)
rect(drawbox['xMin'] + (self.glyph.width - self.glyph.rightMargin)*sc, drawbox['yMin'], self.glyph.rightMargin*sc, UPM*sc)
#rect((drawbox['xMax'] - self.glyph.rightMargin)*sc, drawbox['yMin'], self.glyph.width*sc, UPM*sc)
def addNewPage(self):
newPage()
showPageMargins()
def drawGlyph(self):
save()
stroke(None)
fill(0)
translate(drawbox['xMin'], 0)
translate(0, drawbox['yMin'] + abs(descender)*sc)
scale(sc)
self._drawGlyph()
restore()
def center(self, horizontal=True, vertical=True):
offset_x = 0
offset_y = 0
if horizontal:
offset_x = (drawbox['width'] - self.glyph.width*sc)/2
if vertical:
offset_y = (drawbox['height'] - UPM*sc)/2
translate(offset_x, offset_y)
def _drawGlyph(self):
pen = CocoaPen(self.glyph.getParent())
self.glyph.draw(pen)
drawPath(pen.path)
def getMaxWidth():
max_width = 0
for g in my_selection:
if g.width > max_width:
max_width = g.width
return max_width
def getScale():
# The glyphs should be displayed as big as possible.
# So the most wide glyph will be the base for the scaling.
sc = drawbox['width']/max_width
return sc
max_width = getMaxWidth()
sc = getScale()
UPM = font.info.unitsPerEm
xHeight = font.info.xHeight
capHeight = font.info.capHeight
ascender = font.info.ascender
descender = font.info.descender
for g in my_selection:
if len(g) > 0: # Ignore whitespace glyphs
glyph = RegisterGlyph(g)
glyph.addNewPage()
# Just uncomment any of the following methods if you don't want them to be drawn
glyph.center(True, True)
glyph.drawLeftMargin()
glyph.drawRightMargin()
glyph.drawGlyph()
glyph.drawBoundingBox()
glyph.drawBaseline()
glyph.drawXHeight()
glyph.drawCapHeight()
| {
"repo_name": "AlphabetType/DrawBot-Scripts",
"path": "createGlyphsPDF.py",
"copies": "1",
"size": "4942",
"license": "mit",
"hash": -2610880716260796400,
"line_mean": 28.9515151515,
"line_max": 154,
"alpha_frac": 0.6011736139,
"autogenerated": false,
"ratio": 3.316778523489933,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9233247073895681,
"avg_score": 0.03694101269885026,
"num_lines": 165
} |
from fontTools.pens.filterPen import FilterPen, FilterPointPen
__all__ = ["TransformPen"]
class TransformPen(FilterPen):
"""Pen that transforms all coordinates using a Affine transformation,
and passes them to another pen.
"""
def __init__(self, outPen, transformation):
"""The 'outPen' argument is another pen object. It will receive the
transformed coordinates. The 'transformation' argument can either
be a six-tuple, or a fontTools.misc.transform.Transform object.
"""
super(TransformPen, self).__init__(outPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._stack = []
def moveTo(self, pt):
self._outPen.moveTo(self._transformPoint(pt))
def lineTo(self, pt):
self._outPen.lineTo(self._transformPoint(pt))
def curveTo(self, *points):
self._outPen.curveTo(*self._transformPoints(points))
def qCurveTo(self, *points):
if points[-1] is None:
points = self._transformPoints(points[:-1]) + [None]
else:
points = self._transformPoints(points)
self._outPen.qCurveTo(*points)
def _transformPoints(self, points):
transformPoint = self._transformPoint
return [transformPoint(pt) for pt in points]
def closePath(self):
self._outPen.closePath()
def endPath(self):
self._outPen.endPath()
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
class TransformPointPen(FilterPointPen):
"""PointPen that transforms all coordinates using a Affine transformation,
and passes them to another PointPen.
>>> from fontTools.pens.recordingPen import RecordingPointPen
>>> rec = RecordingPointPen()
>>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
>>> v = iter(rec.value)
>>> pen.beginPath(identifier="contour-0")
>>> next(v)
('beginPath', (), {'identifier': 'contour-0'})
>>> pen.addPoint((100, 100), "line")
>>> next(v)
('addPoint', ((190, 205), 'line', False, None), {})
>>> pen.endPath()
>>> next(v)
('endPath', (), {})
>>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
>>> next(v)
('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
"""
def __init__(self, outPointPen, transformation):
"""The 'outPointPen' argument is another point pen object.
It will receive the transformed coordinates.
The 'transformation' argument can either be a six-tuple, or a
fontTools.misc.transform.Transform object.
"""
super().__init__(outPointPen)
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
self._outPen.addPoint(
self._transformPoint(pt), segmentType, smooth, name, **kwargs
)
def addComponent(self, baseGlyphName, transformation, **kwargs):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
if __name__ == "__main__":
from fontTools.pens.basePen import _TestPen
pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
pen.closePath()
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/pens/transformPen.py",
"copies": "3",
"size": "3560",
"license": "apache-2.0",
"hash": -158193839039064800,
"line_mean": 31.962962963,
"line_max": 85,
"alpha_frac": 0.708988764,
"autogenerated": false,
"ratio": 3.3427230046948355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5551711768694836,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.pointPen import AbstractPointPen
from warnings import warn
class TransformPointPen(AbstractPointPen):
"""PointPen that transforms all coordinates, and passes them to another
PointPen. It also transforms the transformation given to addComponent().
"""
def __init__(self, outPen, transformation):
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._outPen = outPen
self._stack = []
def beginPath(self, identifier=None):
self._outPen.beginPath(identifier=identifier)
def endPath(self):
self._outPen.endPath()
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
pt = self._transformPoint(pt)
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def addComponent(self, glyphName, transformation, identifier=None):
transformation = self._transformation.transform(transformation)
try:
self._outPen.addComponent(glyphName, transformation, identifier)
except TypeError:
self._outPen.addComponent(glyphName, transformation)
warn("The addComponent method needs an identifier kwarg. The component's identifier value has been discarded.", DeprecationWarning)
| {
"repo_name": "moyogo/defcon",
"path": "Lib/defcon/pens/transformPointPen.py",
"copies": "1",
"size": "1483",
"license": "mit",
"hash": -1326221989344743400,
"line_mean": 40.1944444444,
"line_max": 143,
"alpha_frac": 0.7019554956,
"autogenerated": false,
"ratio": 4.768488745980707,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005358367626886146,
"num_lines": 36
} |
from fontTools.pens.pointPen import AbstractPointPen
class GlyphObjectPointPen(AbstractPointPen):
def __init__(self, glyph):
self._glyph = glyph
self._contour = None
self.skipConflictingIdentifiers = False
def beginPath(self, identifier=None, **kwargs):
self._contour = self._glyph.instantiateContour()
self._contour.disableNotifications()
if identifier is not None:
if self.skipConflictingIdentifiers and identifier in self._glyph.identifiers:
pass
else:
self._contour.identifier = identifier
def endPath(self):
self._contour.dirty = False
self._glyph.appendContour(self._contour)
self._contour.enableNotifications()
self._contour = None
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
if self.skipConflictingIdentifiers and identifier in self._glyph.identifiers:
identifier = None
self._contour.addPoint(pt, segmentType, smooth, name, identifier=identifier)
def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
if self.skipConflictingIdentifiers and identifier in self._glyph.identifiers:
identifier = None
component = self._glyph.instantiateComponent()
component.baseGlyph = baseGlyphName
component.transformation = transformation
component.identifier = identifier
self._glyph.appendComponent(component)
class GlyphObjectLoadingPointPen(GlyphObjectPointPen):
def __init__(self, glyph):
super(GlyphObjectLoadingPointPen, self).__init__(glyph)
self._contours = glyph._shallowLoadedContours
def beginPath(self, identifier=None, **kwargs):
contour = dict(points=[])
if identifier is not None and self.skipConflictingIdentifiers and identifier in self._glyph.identifiers:
identifier = None
if identifier is not None:
if identifier in self._glyph.identifiers:
raise DefconError("The contour identifier (%s) is already used." % identifier)
# FIXME: we should do self._glyph.identifiers.add(identifier)
# otherwise the shallow contours could define the same identifier multiple times
# or even between shallow loading and real loading something else could
# take the identifier. The check above is pretty much worthless
# without storing the identifier.
contour["identifier"] = identifier
self._contours.append(contour)
def endPath(self):
pass
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
args = (pt,)
kwargs = dict(
segmentType=segmentType,
smooth=smooth,
name=name
)
if identifier is not None and self.skipConflictingIdentifiers and identifier in self._glyph.identifiers:
identifier = None
if identifier is not None:
if identifier in self._glyph.identifiers:
raise DefconError("The contour identifier (%s) is already used." % identifier)
# FIXME: we should do self._glyph.identifiers.add(identifier)
# otherwise the shallow contours could define the same identifier multiple times
# or even between shallow loading and real loading something else could
# take the identifier. The check above is pretty much worthless
# without storing the identifier.
kwargs["identifier"] = identifier
self._contours[-1]["points"].append((args, kwargs))
| {
"repo_name": "moyogo/defcon",
"path": "Lib/defcon/pens/glyphObjectPointPen.py",
"copies": "2",
"size": "3694",
"license": "mit",
"hash": -302152606674942400,
"line_mean": 44.0487804878,
"line_max": 112,
"alpha_frac": 0.6616134272,
"autogenerated": false,
"ratio": 4.816166883963494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6477780311163494,
"avg_score": null,
"num_lines": null
} |
from fontTools.pens.pointPen import AbstractPointPen
class TransformPointPen(AbstractPointPen):
"""PointPen that transforms all coordinates, and passes them to another
PointPen. It also transforms the transformation given to addComponent().
"""
def __init__(self, outPen, transformation):
if not hasattr(transformation, "transformPoint"):
from fontTools.misc.transform import Transform
transformation = Transform(*transformation)
self._transformation = transformation
self._transformPoint = transformation.transformPoint
self._outPen = outPen
self._stack = []
def beginPath(self, identifier=None):
self._outPen.beginPath(identifier=identifier)
def endPath(self):
self._outPen.endPath()
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
pt = self._transformPoint(pt)
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
def addComponent(self, glyphName, transformation):
transformation = self._transformation.transform(transformation)
self._outPen.addComponent(glyphName, transformation)
| {
"repo_name": "typesupply/defcon",
"path": "Lib/defcon/pens/transformPointPen.py",
"copies": "1",
"size": "1067",
"license": "mit",
"hash": -595172286620103200,
"line_mean": 34.5666666667,
"line_max": 77,
"alpha_frac": 0.771321462,
"autogenerated": false,
"ratio": 3.838129496402878,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5109450958402878,
"avg_score": null,
"num_lines": null
} |
from fontTools.t1Lib import T1Font, T1Error
from fontTools.agl import AGL2UV
from fontTools.misc.psLib import PSInterpreter
from fontTools.misc.transform import Transform
from extractor.tools import RelaxedInfo
# specification: http://partners.adobe.com/public/developer/en/font/T1_SPEC.PDF
# ----------------
# Public Functions
# ----------------
def isType1(pathOrFile):
try:
font = T1Font(pathOrFile)
del font
except T1Error:
return False
return True
def extractFontFromType1(pathOrFile, destination, doGlyphs=True, doInfo=True, doKerning=True, customFunctions=[]):
source = T1Font(pathOrFile)
destination.lib["public.glyphOrder"] = _extractType1GlyphOrder(source)
if doInfo:
extractType1Info(source, destination)
if doGlyphs:
extractType1Glyphs(source, destination)
if doKerning:
# kerning extraction is not supported yet.
# in theory, it could be retried from an AFM.
# we need to find the AFM naming rules so that we can sniff for the file.
pass
for function in customFunctions:
function(source, destination)
def extractType1Info(source, destination):
info = RelaxedInfo(destination.info)
_extractType1FontInfo(source, info)
_extractType1Private(source, info)
_extractType1FontMatrix(source, info)
# ----
# Info
# ----
def _extractType1FontInfo(source, info):
sourceInfo = source["FontInfo"]
# FontName
info.postscriptFontName = source["FontName"]
# version
version = sourceInfo.get("version")
if version is not None:
# the spec says that version will be a string and no formatting info is given.
# so, only move forward if the string can actually be parsed.
try:
# 1. convert to a float
version = float(version)
# 2. convert it back to a string
version = "%.3f" % version
# 3. split.
versionMajor, versionMinor = version.split(".")
# 4. convert.
versionMajor = int(versionMajor)
versionMinor = int(versionMinor)
# 5. set.
info.versionMajor = int(versionMajor)
info.versionMinor = int(versionMinor)
except ValueError:
# couldn't parse. leve the object with the default values.
pass
# Notice
notice = sourceInfo.get("Notice")
if notice:
info.copyright = notice
# FullName
fullName = sourceInfo.get("FullName")
if fullName:
info.postscriptFullName = fullName
# FamilyName
familyName = sourceInfo.get("FamilyName")
if familyName:
info.familyName = familyName
# Weight
postscriptWeightName = sourceInfo.get("Weight")
if postscriptWeightName:
info.postscriptWeightName = postscriptWeightName
# ItalicAngle
info.italicAngle = sourceInfo.get("ItalicAngle")
# IsFixedPitch
info.postscriptIsFixedPitch = sourceInfo.get("isFixedPitch")
# UnderlinePosition/Thickness
info.postscriptUnderlinePosition = sourceInfo.get("UnderlinePosition")
info.postscriptUnderlineThickness = sourceInfo.get("UnderlineThickness")
def _extractType1FontMatrix(source, info):
# units per em
matrix = source["FontMatrix"]
matrix = Transform(*matrix).inverse()
info.unitsPerEm = int(round(matrix[3]))
def _extractType1Private(source, info):
private = source["Private"]
# UniqueID
info.openTypeNameUniqueID = private.get("UniqueID", None)
# BlueValues and OtherBlues
info.postscriptBlueValues = private.get("BlueValues", [])
info.postscriptOtherBlues = private.get("OtherBlues", [])
# FamilyBlues and FamilyOtherBlues
info.postscriptFamilyBlues = private.get("FamilyBlues", [])
info.postscriptFamilyOtherBlues = private.get("FamilyOtherBlues", [])
# BlueScale/Shift/Fuzz
info.postscriptBlueScale = private.get("BlueScale", None)
info.postscriptBlueShift = private.get("BlueShift", None)
info.postscriptBlueFuzz = private.get("BlueFuzz", None)
# StemSnapH/V
info.postscriptStemSnapH = private.get("StemSnapH", [])
info.postscriptStemSnapV = private.get("StemSnapV", [])
# ForceBold
info.postscriptForceBold = bool(private.get("ForceBold", None))
# --------
# Outlines
# --------
def extractType1Glyphs(source, destination):
glyphSet = source.getGlyphSet()
for glyphName in sorted(glyphSet.keys()):
sourceGlyph = glyphSet[glyphName]
# make the new glyph
destination.newGlyph(glyphName)
destinationGlyph = destination[glyphName]
# outlines
pen = destinationGlyph.getPen()
sourceGlyph.draw(pen)
# width
destinationGlyph.width = sourceGlyph.width
# synthesize the unicode value
destinationGlyph.unicode = AGL2UV.get(glyphName)
# -----------
# Glyph order
# -----------
class GlyphOrderPSInterpreter(PSInterpreter):
def __init__(self):
PSInterpreter.__init__(self)
self.glyphOrder = []
self.collectTokenForGlyphOrder = False
def do_literal(self, token):
result = PSInterpreter.do_literal(self, token)
if token == "/FontName":
self.collectTokenForGlyphOrder = False
if self.collectTokenForGlyphOrder:
self.glyphOrder.append(result.value)
if token == "/CharStrings":
self.collectTokenForGlyphOrder = True
return result
def _extractType1GlyphOrder(t1Font):
interpreter = GlyphOrderPSInterpreter()
interpreter.interpret(t1Font.data)
return interpreter.glyphOrder
| {
"repo_name": "typesupply/extractor",
"path": "Lib/extractor/formats/type1.py",
"copies": "3",
"size": "5607",
"license": "mit",
"hash": 6081547244913846000,
"line_mean": 33.1890243902,
"line_max": 114,
"alpha_frac": 0.6688068486,
"autogenerated": false,
"ratio": 3.869565217391304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015120178699149782,
"num_lines": 164
} |
from fontTools.ttLib import getSearchRange
from fontTools.misc.textTools import safeEval, readHex
from fontTools.misc.fixedTools import (
fixedToFloat as fi2fl,
floatToFixed as fl2fi)
from . import DefaultTable
import struct
import sys
import array
import logging
log = logging.getLogger(__name__)
class table__k_e_r_n(DefaultTable.DefaultTable):
def getkern(self, format):
for subtable in self.kernTables:
if subtable.format == format:
return subtable
return None # not found
def decompile(self, data, ttFont):
version, nTables = struct.unpack(">HH", data[:4])
apple = False
if (len(data) >= 8) and (version == 1):
# AAT Apple's "new" format. Hm.
version, nTables = struct.unpack(">LL", data[:8])
self.version = fi2fl(version, 16)
data = data[8:]
apple = True
else:
self.version = version
data = data[4:]
self.kernTables = []
for i in range(nTables):
if self.version == 1.0:
# Apple
length, coverage, subtableFormat = struct.unpack(
">LBB", data[:6])
else:
# in OpenType spec the "version" field refers to the common
# subtable header; the actual subtable format is stored in
# the 8-15 mask bits of "coverage" field.
# This "version" is always 0 so we ignore it here
_, length, subtableFormat, coverage = struct.unpack(
">HHBB", data[:6])
if nTables == 1 and subtableFormat == 0:
# The "length" value is ignored since some fonts
# (like OpenSans and Calibri) have a subtable larger than
# its value.
nPairs, = struct.unpack(">H", data[6:8])
calculated_length = (nPairs * 6) + 14
if length != calculated_length:
log.warning(
"'kern' subtable longer than defined: "
"%d bytes instead of %d bytes" %
(calculated_length, length)
)
length = calculated_length
if subtableFormat not in kern_classes:
subtable = KernTable_format_unkown(subtableFormat)
else:
subtable = kern_classes[subtableFormat](apple)
subtable.decompile(data[:length], ttFont)
self.kernTables.append(subtable)
data = data[length:]
def compile(self, ttFont):
if hasattr(self, "kernTables"):
nTables = len(self.kernTables)
else:
nTables = 0
if self.version == 1.0:
# AAT Apple's "new" format.
data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
else:
data = struct.pack(">HH", self.version, nTables)
if hasattr(self, "kernTables"):
for subtable in self.kernTables:
data = data + subtable.compile(ttFont)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
for subtable in self.kernTables:
subtable.toXML(writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
return
if name != "kernsubtable":
return
if not hasattr(self, "kernTables"):
self.kernTables = []
format = safeEval(attrs["format"])
if format not in kern_classes:
subtable = KernTable_format_unkown(format)
else:
apple = self.version == 1.0
subtable = kern_classes[format](apple)
self.kernTables.append(subtable)
subtable.fromXML(name, attrs, content, ttFont)
class KernTable_format_0(object):
# 'version' is kept for backward compatibility
version = format = 0
def __init__(self, apple=False):
self.apple = apple
def decompile(self, data, ttFont):
if not self.apple:
version, length, subtableFormat, coverage = struct.unpack(
">HHBB", data[:6])
if version != 0:
from fontTools.ttLib import TTLibError
raise TTLibError(
"unsupported kern subtable version: %d" % version)
tupleIndex = None
# Should we also assert length == len(data)?
data = data[6:]
else:
length, coverage, subtableFormat, tupleIndex = struct.unpack(
">LBBH", data[:8])
data = data[8:]
assert self.format == subtableFormat, "unsupported format"
self.coverage = coverage
self.tupleIndex = tupleIndex
self.kernTable = kernTable = {}
nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
">HHHH", data[:8])
data = data[8:]
datas = array.array("H", data[:6 * nPairs])
if sys.byteorder != "big": datas.byteswap()
it = iter(datas)
glyphOrder = ttFont.getGlyphOrder()
for k in range(nPairs):
left, right, value = next(it), next(it), next(it)
if value >= 32768:
value -= 65536
try:
kernTable[(glyphOrder[left], glyphOrder[right])] = value
except IndexError:
# Slower, but will not throw an IndexError on an invalid
# glyph id.
kernTable[(
ttFont.getGlyphName(left),
ttFont.getGlyphName(right))] = value
if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
log.warning(
"excess data in 'kern' subtable: %d bytes",
len(data) - 6 * nPairs)
def compile(self, ttFont):
nPairs = len(self.kernTable)
searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
searchRange &= 0xFFFF
data = struct.pack(
">HHHH", nPairs, searchRange, entrySelector, rangeShift)
# yeehee! (I mean, turn names into indices)
try:
reverseOrder = ttFont.getReverseGlyphMap()
kernTable = sorted(
(reverseOrder[left], reverseOrder[right], value)
for ((left, right), value) in self.kernTable.items())
except KeyError:
# Slower, but will not throw KeyError on invalid glyph id.
getGlyphID = ttFont.getGlyphID
kernTable = sorted(
(getGlyphID(left), getGlyphID(right), value)
for ((left, right), value) in self.kernTable.items())
for left, right, value in kernTable:
data = data + struct.pack(">HHh", left, right, value)
if not self.apple:
version = 0
length = len(data) + 6
if length >= 0x10000:
log.warning('"kern" subtable overflow, '
'truncating length value while preserving pairs.')
length &= 0xFFFF
header = struct.pack(
">HHBB", version, length, self.format, self.coverage)
else:
if self.tupleIndex is None:
# sensible default when compiling a TTX from an old fonttools
# or when inserting a Windows-style format 0 subtable into an
# Apple version=1.0 kern table
log.warning("'tupleIndex' is None; default to 0")
self.tupleIndex = 0
length = len(data) + 8
header = struct.pack(
">LBBH", length, self.coverage, self.format, self.tupleIndex)
return header + data
def toXML(self, writer, ttFont):
attrs = dict(coverage=self.coverage, format=self.format)
if self.apple:
if self.tupleIndex is None:
log.warning("'tupleIndex' is None; default to 0")
attrs["tupleIndex"] = 0
else:
attrs["tupleIndex"] = self.tupleIndex
writer.begintag("kernsubtable", **attrs)
writer.newline()
items = sorted(self.kernTable.items())
for (left, right), value in items:
writer.simpletag("pair", [
("l", left),
("r", right),
("v", value)
])
writer.newline()
writer.endtag("kernsubtable")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.coverage = safeEval(attrs["coverage"])
subtableFormat = safeEval(attrs["format"])
if self.apple:
if "tupleIndex" in attrs:
self.tupleIndex = safeEval(attrs["tupleIndex"])
else:
# previous fontTools versions didn't export tupleIndex
log.warning(
"Apple kern subtable is missing 'tupleIndex' attribute")
self.tupleIndex = None
else:
self.tupleIndex = None
assert subtableFormat == self.format, "unsupported format"
if not hasattr(self, "kernTable"):
self.kernTable = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
def __getitem__(self, pair):
return self.kernTable[pair]
def __setitem__(self, pair, value):
self.kernTable[pair] = value
def __delitem__(self, pair):
del self.kernTable[pair]
class KernTable_format_unkown(object):
def __init__(self, format):
self.format = format
def decompile(self, data, ttFont):
self.data = data
def compile(self, ttFont):
return self.data
def toXML(self, writer, ttFont):
writer.begintag("kernsubtable", format=self.format)
writer.newline()
writer.comment("unknown 'kern' subtable format")
writer.newline()
writer.dumphex(self.data)
writer.endtag("kernsubtable")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
self.decompile(readHex(content), ttFont)
kern_classes = {0: KernTable_format_0}
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/tables/_k_e_r_n.py",
"copies": "5",
"size": "8423",
"license": "apache-2.0",
"hash": -317145040945890400,
"line_mean": 28.6584507042,
"line_max": 68,
"alpha_frac": 0.674581503,
"autogenerated": false,
"ratio": 3.102394106813996,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0331793935060221,
"num_lines": 284
} |
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._k_e_r_n import KernTable_format_0, KernTable_format_unkown
from fontbakery.checkrunner import INFO, FAIL, WARN
from fontbakery.codetesting import (assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.profiles import opentype as opentype_profile
def test_check_kern_table():
""" Is there a "kern" table declared in the font? """
check = CheckTester(opentype_profile,
"com.google.fonts/check/kern_table")
# Our reference Mada Regular is known to be good
# (does not have a 'kern' table):
ttFont = TTFont(TEST_FILE("mada/Mada-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a font without a "kern" table...')
# add a basic 'kern' table:
kern = ttFont["kern"] = newTable("kern")
kern.version = 0
subtable = KernTable_format_0()
subtable.coverage = 1
subtable.version = 0
subtable.kernTable = {("A", "V"): -50}
kern.kernTables = [subtable]
# and make sure the check emits an INFO message:
assert_results_contain(check(ttFont),
INFO, 'kern-found',
'with a font containing a "kern" table...')
# and a FAIL message when a non-character glyph is used.
subtable.kernTable.update({("A", "four.dnom"): -50})
assert_results_contain(check(ttFont),
FAIL, 'kern-non-character-glyphs',
'The following glyphs should not be used...')
# and a WARN message when a non-character glyph is used.
subtable = KernTable_format_unkown(2)
kern.kernTables = [subtable]
assert_results_contain(check(ttFont),
WARN, 'kern-unknown-format',
'The "kern" table does not have any format-0 subtable...')
| {
"repo_name": "googlefonts/fontbakery",
"path": "tests/profiles/kern_test.py",
"copies": "2",
"size": "2009",
"license": "apache-2.0",
"hash": -811473691348545900,
"line_mean": 38.3921568627,
"line_max": 88,
"alpha_frac": 0.5893479343,
"autogenerated": false,
"ratio": 3.841300191204589,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5430648125504589,
"avg_score": null,
"num_lines": null
} |
from fontTools.ttLib import TTFont
from fontTools.feaLib.builder import addOpenTypeFeatures, Builder
from fontTools.feaLib.error import FeatureLibError
from fontTools import configLogger
from fontTools.misc.cliTools import makeOutputFileName
import sys
import argparse
import logging
log = logging.getLogger("fontTools.feaLib")
def main(args=None):
"""Add features from a feature file (.fea) into a OTF font"""
parser = argparse.ArgumentParser(
description="Use fontTools to compile OpenType feature files (*.fea)."
)
parser.add_argument(
"input_fea", metavar="FEATURES", help="Path to the feature file"
)
parser.add_argument(
"input_font", metavar="INPUT_FONT", help="Path to the input font"
)
parser.add_argument(
"-o",
"--output",
dest="output_font",
metavar="OUTPUT_FONT",
help="Path to the output font.",
)
parser.add_argument(
"-t",
"--tables",
metavar="TABLE_TAG",
choices=Builder.supportedTables,
nargs="+",
help="Specify the table(s) to be built.",
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Add source-level debugging information to font.",
)
parser.add_argument(
"-v",
"--verbose",
help="increase the logger verbosity. Multiple -v " "options are allowed.",
action="count",
default=0,
)
parser.add_argument(
"--traceback", help="show traceback for exceptions.", action="store_true"
)
options = parser.parse_args(args)
levels = ["WARNING", "INFO", "DEBUG"]
configLogger(level=levels[min(len(levels) - 1, options.verbose)])
output_font = options.output_font or makeOutputFileName(options.input_font)
log.info("Compiling features to '%s'" % (output_font))
font = TTFont(options.input_font)
try:
addOpenTypeFeatures(
font, options.input_fea, tables=options.tables, debug=options.debug
)
except FeatureLibError as e:
if options.traceback:
raise
log.error(e)
font.save(output_font)
if __name__ == "__main__":
sys.exit(main())
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/feaLib/__main__.py",
"copies": "5",
"size": "2219",
"license": "apache-2.0",
"hash": 274769174998805150,
"line_mean": 27.8181818182,
"line_max": 82,
"alpha_frac": 0.6219017575,
"autogenerated": false,
"ratio": 3.9695885509838997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.70914903084839,
"avg_score": null,
"num_lines": null
} |
from fontTools.ttLib import TTFont
import json
import os
import tempfile
from fontbakery.checkrunner import FAIL
from fontbakery.codetesting import (assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.profiles import universal as universal_profile
def wrap_args(config, font):
ttFont = TTFont(font)
return {
"config": config,
"font": font,
"fonts": [font],
"ttFont": ttFont,
"ttFonts": [ttFont],
}
def test_check_shaping_regression():
""" Check that we can test shaping against expectations. """
check = CheckTester(universal_profile,
"com.google.fonts/check/shaping/regression")
shaping_test = {
"configuration": {},
"tests": [{"input": "AV",
"expectation": "A=0+664|V=1+691"}],
}
with tempfile.TemporaryDirectory() as tmp_gf_dir:
json.dump(shaping_test, open(os.path.join(tmp_gf_dir, "test.json"), "w"))
config = {"com.google.fonts/check/shaping": {"test_directory": tmp_gf_dir}}
font = TEST_FILE("nunito/Nunito-Regular.ttf")
assert_PASS(check(wrap_args(config, font)),
"Nunito: A=664,V=691")
font = TEST_FILE("slabo/Slabo13px.ttf")
assert_results_contain(check(wrap_args(config, font)),
FAIL, "shaping-regression",
"Slabo: A!=664,V!=691")
def test_check_shaping_forbidden():
""" Check that we can test for forbidden glyphs in output. """
check = CheckTester(universal_profile,
"com.google.fonts/check/shaping/forbidden")
shaping_test = {
"configuration": {"forbidden_glyphs": [".notdef"]},
"tests": [{"input": "日"}],
}
with tempfile.TemporaryDirectory() as tmp_gf_dir:
json.dump(shaping_test, open(os.path.join(tmp_gf_dir, "test.json"), "w"))
config = {"com.google.fonts/check/shaping": {"test_directory": tmp_gf_dir}}
font = TEST_FILE("cjk/SourceHanSans-Regular.otf")
assert_PASS(check(wrap_args(config, font)),
"Source Han contains CJK")
font = TEST_FILE("slabo/Slabo13px.ttf")
assert_results_contain(check(wrap_args(config, font)),
FAIL, "shaping-forbidden",
"Slabo shapes .notdef for CJK")
def test_check_shaping_collides():
""" Check that we can test for colliding glyphs in output. """
check = CheckTester(universal_profile,
"com.google.fonts/check/shaping/collides")
shaping_test = {
"configuration": {"collidoscope": {"area": 0,
"marks": True}},
"tests": [{"input": "ïï"}],
}
with tempfile.TemporaryDirectory() as tmp_gf_dir:
json.dump(shaping_test, open(os.path.join(tmp_gf_dir, "test.json"), "w"))
config = {"com.google.fonts/check/shaping": {"test_directory": tmp_gf_dir}}
font = TEST_FILE("cousine/Cousine-Regular.ttf")
assert_PASS(check(wrap_args(config, font)),
"ïï doesn't collide in Cousine")
font = TEST_FILE("nunito/Nunito-Black.ttf")
assert_results_contain(check(wrap_args(config, font)),
FAIL, "shaping-collides",
"ïï collides in Nunito")
| {
"repo_name": "googlefonts/fontbakery",
"path": "tests/profiles/shaping_test.py",
"copies": "1",
"size": "3511",
"license": "apache-2.0",
"hash": 8790068122700734000,
"line_mean": 34.3838383838,
"line_max": 83,
"alpha_frac": 0.5560947759,
"autogenerated": false,
"ratio": 3.6951476793248945,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47512424552248944,
"avg_score": null,
"num_lines": null
} |
from fontTools.ttLib import TTFont
# Base 字体的编码和数字的对应关系
BASE_FONT = {
'x': '.',
'uniE2AE': '7',
'uniE450': '5',
'uniE7B8': '9',
'uniE90F': '8',
'uniEA64': '0',
'uniF0A1': '1',
'uniF442': '6',
'uniF662': '2',
'uniF69D': '4',
'uniF8B2': '3',
}
def get_glyf_data(glyf):
'''将字形数据转化成 16 进制字符串'''
if hasattr(glyf, 'coordinates'):
return glyf.coordinates.array.tobytes().hex()
else:
return None
def gen_glyf_num_dict(font):
'''
根据字体生成 {字形: 数字} 的字典
Args:
font: 字体对象
'''
glyf_num_dict = {}
for fcode in font['glyf'].keys():
# 获取 16 进制字形数据
glyf = font['glyf'][fcode] # 字形数据
hex_glyf = get_glyf_data(glyf)
if hex_glyf != None:
glyf_num_dict[hex_glyf] = BASE_FONT[fcode]
return glyf_num_dict
def gen_glyf_fcode_dict(font):
'''
根据字体生成 {字形: 字体编码} 的字典
Args:
font: 字体对象
'''
glyf_fcode_dict = {}
for fcode in font['glyf'].keys():
glyf = font['glyf'][fcode]
hex_glyf = get_glyf_data(glyf)
if hex_glyf != None:
glyf_fcode_dict[hex_glyf] = fcode
return glyf_fcode_dict
def gen_fcode_num_dict(base_font, new_font):
'''
根据 base 字体,生成新字体中 {新编码: 数字} 的字典
Args:
base_font: Base字体对象
new_font: 新字体对象
'''
glyf_num_dict = gen_glyf_num_dict(base_font) # 获取 base 字体中 字形-数字 的对应关系
glyf_fcode_dict = gen_glyf_fcode_dict(new_font) # 获取新字体中 字形-编码 的对应关系
# 根据字形,将 new_font 中的编码与 base_font 中的 num 对应起来
fcode_num_dict = {}
for glyf, fcode in glyf_fcode_dict.items():
num = glyf_num_dict[glyf] # 根据字形取出 base 中的 num
fcode_num_dict[fcode] = num # 构建 新编码 与 num 的对应关系
return fcode_num_dict
def convert_num_code_to_fcodes(num_code):
'''对字体编码做转义'''
fcodes = []
for char in num_code:
if char == '.':
fcode = 'x' # 小数点转成 'x'
else:
code = ord(char)
hex_code = '%x'.upper() % code
fcode = 'uni%s' % hex_code
fcodes.append(fcode)
return fcodes
def get_real_num(base_font_path, new_font_path, num_code):
'''
获取实际数字
Args:
base_font_path: base 字体文件路径
new_font_path: 新字体文件路径
num_code: 页面上抓去来的数字 (直接打印会是一串乱码)
'''
# 读取字体文件
base_font = TTFont(base_font_path)
new_font = TTFont(new_font_path)
fcode_num_mapper = gen_fcode_num_dict(base_font, new_font) # 获取编码与数字的对应关系
fcodes = convert_num_code_to_fcodes(num_code)
print('编码与数字的映射关系')
for fcode, num in fcode_num_mapper.items():
print('%10s -> %s' % (fcode, num))
print('页面抓取的字符串编码: %s\n' % fcodes)
num = ''.join(fcode_num_mapper[k] for k in fcodes)
return num
if __name__ == '__main__':
# 每次将字体文件保存成 new_font.woff, 页面上的数字为 page_num
page_num = '.'
num = get_real_num('base.woff', 'new_font.woff', page_num)
print('票房:%s' % num)
| {
"repo_name": "seamile/WeedLab",
"path": "MaoYanSpider/parse.py",
"copies": "2",
"size": "3501",
"license": "mit",
"hash": -2348253958999298000,
"line_mean": 22.7338709677,
"line_max": 78,
"alpha_frac": 0.5518178729,
"autogenerated": false,
"ratio": 2.0739957716701904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8621539348026706,
"avg_score": 0.000854859308696906,
"num_lines": 124
} |
from fontTools.ttLib.tables import DefaultTable
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval
import struct
VDMX_HeaderFmt = """
> # big endian
version: H # Version number (0 or 1)
numRecs: H # Number of VDMX groups present
numRatios: H # Number of aspect ratio groupings
"""
# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
# ratio ranges);
VDMX_RatRangeFmt = """
> # big endian
bCharSet: B # Character set
xRatio: B # Value to use for x-Ratio
yStartRatio: B # Starting y-Ratio value
yEndRatio: B # Ending y-Ratio value
"""
# followed by an array of offset[numRatios] from start of VDMX table to the
# VDMX Group for this ratio range (offsets will be re-calculated on compile);
# followed by an array of Group[numRecs] records;
VDMX_GroupFmt = """
> # big endian
recs: H # Number of height records in this group
startsz: B # Starting yPelHeight
endsz: B # Ending yPelHeight
"""
# followed by an array of vTable[recs] records.
VDMX_vTableFmt = """
> # big endian
yPelHeight: H # yPelHeight to which values apply
yMax: h # Maximum value (in pels) for this yPelHeight
yMin: h # Minimum value (in pels) for this yPelHeight
"""
class table_V_D_M_X_(DefaultTable.DefaultTable):
def decompile(self, data, ttFont):
pos = 0 # track current position from to start of VDMX table
dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
pos += sstruct.calcsize(VDMX_HeaderFmt)
self.ratRanges = []
for i in range(self.numRatios):
ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
pos += sstruct.calcsize(VDMX_RatRangeFmt)
# the mapping between a ratio and a group is defined further below
ratio['groupIndex'] = None
self.ratRanges.append(ratio)
lenOffset = struct.calcsize('>H')
_offsets = [] # temporarily store offsets to groups
for i in range(self.numRatios):
offset = struct.unpack('>H', data[0:lenOffset])[0]
data = data[lenOffset:]
pos += lenOffset
_offsets.append(offset)
self.groups = []
for groupIndex in range(self.numRecs):
# the offset to this group from beginning of the VDMX table
currOffset = pos
group, data = sstruct.unpack2(VDMX_GroupFmt, data)
# the group lenght and bounding sizes are re-calculated on compile
recs = group.pop('recs')
startsz = group.pop('startsz')
endsz = group.pop('endsz')
pos += sstruct.calcsize(VDMX_GroupFmt)
for j in range(recs):
vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
vTableLength = sstruct.calcsize(VDMX_vTableFmt)
pos += vTableLength
# group is a dict of (yMax, yMin) tuples keyed by yPelHeight
group[vTable['yPelHeight']] = (vTable['yMax'], vTable['yMin'])
# make sure startsz and endsz match the calculated values
minSize = min(group.keys())
maxSize = max(group.keys())
assert startsz == minSize, \
"startsz (%s) must equal min yPelHeight (%s): group %d" % \
(group.startsz, minSize, groupIndex)
assert endsz == maxSize, \
"endsz (%s) must equal max yPelHeight (%s): group %d" % \
(group.endsz, maxSize, groupIndex)
self.groups.append(group)
# match the defined offsets with the current group's offset
for offsetIndex, offsetValue in enumerate(_offsets):
# when numRecs < numRatios there can more than one ratio range
# sharing the same VDMX group
if currOffset == offsetValue:
# map the group with the ratio range thas has the same
# index as the offset to that group (it took me a while..)
self.ratRanges[offsetIndex]['groupIndex'] = groupIndex
# check that all ratio ranges have a group
for i in range(self.numRatios):
ratio = self.ratRanges[i]
if ratio['groupIndex'] is None:
from fontTools import ttLib
raise ttLib.TTLibError(
"no group defined for ratRange %d" % i)
def _getOffsets(self):
"""
Calculate offsets to VDMX_Group records.
For each ratRange return a list of offset values from the beginning of
the VDMX table to a VDMX_Group.
"""
lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
lenOffset = struct.calcsize('>H')
lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
lenVTable = sstruct.calcsize(VDMX_vTableFmt)
# offset to the first group
pos = lenHeader + self.numRatios*lenRatRange + self.numRatios*lenOffset
groupOffsets = []
for group in self.groups:
groupOffsets.append(pos)
lenGroup = lenGroupHeader + len(group) * lenVTable
pos += lenGroup # offset to next group
offsets = []
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
offsets.append(groupOffsets[groupIndex])
return offsets
def compile(self, ttFont):
if not(self.version == 0 or self.version == 1):
from fontTools import ttLib
raise ttLib.TTLibError(
"unknown format for VDMX table: version %s" % self.version)
data = sstruct.pack(VDMX_HeaderFmt, self)
for ratio in self.ratRanges:
data += sstruct.pack(VDMX_RatRangeFmt, ratio)
# recalculate offsets to VDMX groups
for offset in self._getOffsets():
data += struct.pack('>H', offset)
for group in self.groups:
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
gHeader = {'recs': recs, 'startsz': startsz, 'endsz': endsz}
data += sstruct.pack(VDMX_GroupFmt, gHeader)
for yPelHeight, (yMax, yMin) in group.items():
vTable = {'yPelHeight': yPelHeight, 'yMax': yMax, 'yMin': yMin}
data += sstruct.pack(VDMX_vTableFmt, vTable)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
writer.begintag("ratRanges")
writer.newline()
for ratio in self.ratRanges:
groupIndex = ratio['groupIndex']
writer.simpletag(
"ratRange",
bCharSet=ratio['bCharSet'],
xRatio=ratio['xRatio'],
yStartRatio=ratio['yStartRatio'],
yEndRatio=ratio['yEndRatio'],
groupIndex=groupIndex
)
writer.newline()
writer.endtag("ratRanges")
writer.newline()
writer.begintag("groups")
writer.newline()
for groupIndex in range(self.numRecs):
group = self.groups[groupIndex]
recs = len(group)
startsz = min(group.keys())
endsz = max(group.keys())
writer.begintag("group", index=groupIndex)
writer.newline()
writer.comment("recs=%d, startsz=%d, endsz=%d" %
(recs, startsz, endsz))
writer.newline()
for yPelHeight in group.keys():
yMax, yMin = group[yPelHeight]
writer.simpletag(
"record", yPelHeight=yPelHeight, yMax=yMax, yMin=yMin)
writer.newline()
writer.endtag("group")
writer.newline()
writer.endtag("groups")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "version":
self.version = safeEval(attrs["value"])
elif name == "ratRanges":
if not hasattr(self, "ratRanges"):
self.ratRanges = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "ratRange":
if not hasattr(self, "numRatios"):
self.numRatios = 1
else:
self.numRatios += 1
ratio = {
"bCharSet": safeEval(attrs["bCharSet"]),
"xRatio": safeEval(attrs["xRatio"]),
"yStartRatio": safeEval(attrs["yStartRatio"]),
"yEndRatio": safeEval(attrs["yEndRatio"]),
"groupIndex": safeEval(attrs["groupIndex"])
}
self.ratRanges.append(ratio)
elif name == "groups":
if not hasattr(self, "groups"):
self.groups = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "group":
if not hasattr(self, "numRecs"):
self.numRecs = 1
else:
self.numRecs += 1
group = {}
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == "record":
yPelHeight = safeEval(attrs["yPelHeight"])
yMax = safeEval(attrs["yMax"])
yMin = safeEval(attrs["yMin"])
group[yPelHeight] = (yMax, yMin)
self.groups.append(group)
| {
"repo_name": "googlei18n/TachyFont",
"path": "run_time/src/gae_server/third_party/fonttools/Lib/fontTools/ttLib/tables/V_D_M_X_.py",
"copies": "4",
"size": "8193",
"license": "apache-2.0",
"hash": 260941985493819940,
"line_mean": 34.3146551724,
"line_max": 77,
"alpha_frac": 0.668253387,
"autogenerated": false,
"ratio": 3.0650953984287317,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030251724527275495,
"num_lines": 232
} |
from fontTools.ttLib.tables import otTables as ot
from .table_builder import TableUnbuilder
def unbuildColrV1(layerV1List, baseGlyphV1List):
unbuilder = LayerV1ListUnbuilder(layerV1List.Paint)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
for rec in baseGlyphV1List.BaseGlyphV1Record
}
def _flatten(lst):
for el in lst:
if isinstance(el, list):
yield from _flatten(el)
else:
yield el
class LayerV1ListUnbuilder:
def __init__(self, layers):
self.layers = layers
callbacks = {
(
ot.Paint,
ot.PaintFormat.PaintColrLayers,
): self._unbuildPaintColrLayers,
}
self.tableUnbuilder = TableUnbuilder(callbacks)
def unbuildPaint(self, paint):
assert isinstance(paint, ot.Paint)
return self.tableUnbuilder.unbuild(paint)
def _unbuildPaintColrLayers(self, source):
assert source["Format"] == ot.PaintFormat.PaintColrLayers
layers = list(
_flatten(
[
self.unbuildPaint(childPaint)
for childPaint in self.layers[
source["FirstLayerIndex"] : source["FirstLayerIndex"]
+ source["NumLayers"]
]
]
)
)
if len(layers) == 1:
return layers[0]
return {"Format": source["Format"], "Layers": layers}
if __name__ == "__main__":
from pprint import pprint
import sys
from fontTools.ttLib import TTFont
try:
fontfile = sys.argv[1]
except IndexError:
sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
font = TTFont(fontfile)
colr = font["COLR"]
if colr.version < 1:
sys.exit(f"error: No COLR table version=1 found in {fontfile}")
colorGlyphs = unbuildColrV1(
colr.table.LayerV1List,
colr.table.BaseGlyphV1List,
ignoreVarIdx=not colr.table.VarStore,
)
pprint(colorGlyphs)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py",
"copies": "3",
"size": "2074",
"license": "apache-2.0",
"hash": -2067562385833911600,
"line_mean": 25.253164557,
"line_max": 77,
"alpha_frac": 0.581485053,
"autogenerated": false,
"ratio": 3.8550185873605947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001622849724115547,
"num_lines": 79
} |
from fontTools.ttLib.ttFont import TTFont
from fontTools.ttLib.sfnt import readTTCHeader, writeTTCHeader
from io import BytesIO
import struct
import logging
log = logging.getLogger(__name__)
class TTCollection(object):
"""Object representing a TrueType Collection / OpenType Collection.
The main API is self.fonts being a list of TTFont instances.
If shareTables is True, then different fonts in the collection
might point to the same table object if the data for the table was
the same in the font file. Note, however, that this might result
in suprises and incorrect behavior if the different fonts involved
have different GlyphOrder. Use only if you know what you are doing.
"""
def __init__(self, file=None, shareTables=False, **kwargs):
fonts = self.fonts = []
if file is None:
return
assert 'fontNumber' not in kwargs, kwargs
if not hasattr(file, "read"):
file = open(file, "rb")
tableCache = {} if shareTables else None
header = readTTCHeader(file)
for i in range(header.numFonts):
font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
fonts.append(font)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
for font in self.fonts:
font.close()
def save(self, file, shareTables=True):
"""Save the font to disk. Similarly to the constructor,
the 'file' argument can be either a pathname or a writable
file object.
"""
if not hasattr(file, "write"):
final = None
file = open(file, "wb")
else:
# assume "file" is a writable file object
# write to a temporary stream to allow saving to unseekable streams
final = file
file = BytesIO()
tableCache = {} if shareTables else None
offsets_offset = writeTTCHeader(file, len(self.fonts))
offsets = []
for font in self.fonts:
offsets.append(file.tell())
font._save(file, tableCache=tableCache)
file.seek(0,2)
file.seek(offsets_offset)
file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
if final:
final.write(file.getvalue())
file.close()
def saveXML(self, fileOrPath, newlinestr=None, writeVersion=True, **kwargs):
from fontTools.misc import xmlWriter
writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
if writeVersion:
from fontTools import version
version = ".".join(version.split('.')[:2])
writer.begintag("ttCollection", ttLibVersion=version)
else:
writer.begintag("ttCollection")
writer.newline()
writer.newline()
for font in self.fonts:
font._saveXML(writer, writeVersion=False, **kwargs)
writer.newline()
writer.endtag("ttCollection")
writer.newline()
writer.close()
def __getitem__(self, item):
return self.fonts[item]
def __setitem__(self, item, value):
self.fonts[item] = value
def __delitem__(self, item):
return self.fonts[item]
def __len__(self):
return len(self.fonts)
def __iter__(self):
return iter(self.fonts)
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/ttLib/ttCollection.py",
"copies": "5",
"size": "2960",
"license": "apache-2.0",
"hash": -3090669484107063000,
"line_mean": 24.5172413793,
"line_max": 77,
"alpha_frac": 0.7027027027,
"autogenerated": false,
"ratio": 3.274336283185841,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6477038985885841,
"avg_score": null,
"num_lines": null
} |
from fontTools.varLib.instancer import instantiateVariableFont, OverlapMode
from gftools.fix import update_nametable, fix_fs_selection, fix_mac_style
from gftools.utils import font_stylename, font_familyname
__all__ = ["gen_static_font"]
def gen_static_font(
var_font, axes, family_name=None, style_name=None, keep_overlaps=False, dst=None
):
"""Generate a GF spec compliant static font from a variable font.
Args:
var_font: a variable TTFont instance
family_name: font family name
style_name: font style name
axes: dictionary containing axis positions e.g {"wdth": 100, "wght": 400}
keep_overlaps: If true, keep glyph overlaps
dst: Optional. Path to output font
Returns:
A TTFont instance or a filepath if an out path has been provided
"""
if "fvar" not in var_font:
raise ValueError("Font is not a variable font!")
if not keep_overlaps:
keep_overlaps = OverlapMode.REMOVE
# if the axes dict doesn't include all fvar axes, add default fvar vals to it
fvar_dflts = {a.axisTag: a.defaultValue for a in var_font["fvar"].axes}
for k, v in fvar_dflts.items():
if k not in axes:
axes[k] = v
update_style_name = True if not style_name else False
static_font = instantiateVariableFont(
var_font, axes, overlap=keep_overlaps, updateFontNames=update_style_name
)
if not family_name:
family_name = font_familyname(static_font)
if not style_name:
style_name = font_stylename(static_font)
# We need to reupdate the name table using our own update function
# since GF requires axis particles which are not wght or ital to
# be appended to the family name. See func for more details.
update_nametable(static_font, family_name, style_name)
fix_fs_selection(static_font)
fix_mac_style(static_font)
static_font["OS/2"].usWidthClass = 5
if dst:
static_font.save(dst)
return static_font
| {
"repo_name": "googlefonts/gftools",
"path": "Lib/gftools/instancer.py",
"copies": "1",
"size": "1997",
"license": "apache-2.0",
"hash": -5766310285091494000,
"line_mean": 35.9814814815,
"line_max": 84,
"alpha_frac": 0.6805207812,
"autogenerated": false,
"ratio": 3.6575091575091574,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48380299387091574,
"avg_score": null,
"num_lines": null
} |
from fontTools.voltLib.error import VoltLibError
class Lexer(object):
NUMBER = "NUMBER"
STRING = "STRING"
NAME = "NAME"
NEWLINE = "NEWLINE"
CHAR_WHITESPACE_ = " \t"
CHAR_NEWLINE_ = "\r\n"
CHAR_DIGIT_ = "0123456789"
CHAR_UC_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
CHAR_LC_LETTER_ = "abcdefghijklmnopqrstuvwxyz"
CHAR_UNDERSCORE_ = "_"
CHAR_PERIOD_ = "."
CHAR_NAME_START_ = CHAR_UC_LETTER_ + CHAR_LC_LETTER_ + CHAR_PERIOD_ + \
CHAR_UNDERSCORE_
CHAR_NAME_CONTINUATION_ = CHAR_NAME_START_ + CHAR_DIGIT_
def __init__(self, text, filename):
self.filename_ = filename
self.line_ = 1
self.pos_ = 0
self.line_start_ = 0
self.text_ = text
self.text_length_ = len(text)
def __iter__(self):
return self
def next(self): # Python 2
return self.__next__()
def __next__(self): # Python 3
while True:
token_type, token, location = self.next_()
if token_type not in {Lexer.NEWLINE}:
return (token_type, token, location)
def location_(self):
column = self.pos_ - self.line_start_ + 1
return (self.filename_ or "<volt>", self.line_, column)
def next_(self):
self.scan_over_(Lexer.CHAR_WHITESPACE_)
location = self.location_()
start = self.pos_
text = self.text_
limit = len(text)
if start >= limit:
raise StopIteration()
cur_char = text[start]
next_char = text[start + 1] if start + 1 < limit else None
if cur_char == "\n":
self.pos_ += 1
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == "\r":
self.pos_ += (2 if next_char == "\n" else 1)
self.line_ += 1
self.line_start_ = self.pos_
return (Lexer.NEWLINE, None, location)
if cur_char == '"':
self.pos_ += 1
self.scan_until_('"\r\n')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
else:
raise VoltLibError("Expected '\"' to terminate string",
location)
if cur_char in Lexer.CHAR_NAME_START_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
token = text[start:self.pos_]
return (Lexer.NAME, token, location)
if cur_char in Lexer.CHAR_DIGIT_:
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
self.pos_ += 1
self.scan_over_(Lexer.CHAR_DIGIT_)
return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
raise VoltLibError("Unexpected character: '%s'" % cur_char,
location)
def scan_over_(self, valid):
p = self.pos_
while p < self.text_length_ and self.text_[p] in valid:
p += 1
self.pos_ = p
def scan_until_(self, stop_at):
p = self.pos_
while p < self.text_length_ and self.text_[p] not in stop_at:
p += 1
self.pos_ = p
| {
"repo_name": "google/material-design-icons",
"path": "update/venv/lib/python3.9/site-packages/fontTools/voltLib/lexer.py",
"copies": "6",
"size": "3417",
"license": "apache-2.0",
"hash": -4056876380195267000,
"line_mean": 33.5151515152,
"line_max": 78,
"alpha_frac": 0.5229733685,
"autogenerated": false,
"ratio": 3.6545454545454548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7177518823045456,
"avg_score": null,
"num_lines": null
} |
from fooapp import FooApp, start_app
from ncore.data import FileStore
from ncore.daemon import become_daemon
from netgrowl import GrowlRegistrationPacket, GrowlNotificationPacket, GROWL_UDP_PORT
from socket import socket, AF_INET, SOCK_DGRAM
import logging
class Growl(FooApp):
name = 'growl'
config_opts = {
'target': 'IP address to send growl notifications to',
}
def __init__(self, server=None):
FooApp.__init__(self, server)
self.data = FileStore('/tmp/apps/growl')
self.sock = socket(AF_INET, SOCK_DGRAM)
p = GrowlRegistrationPacket(application='foobox', password='')
p.addNotification('Message', enabled=True)
self.sock.sendto(p.payload(), (self.data['target'], GROWL_UDP_PORT))
def send(self, msg):
p = GrowlNotificationPacket(application='foobox',
notification='Message',
title=msg['src'],
description=msg['text'])
self.sock.sendto(p.payload(), (self.data['target'], GROWL_UDP_PORT))
if __name__ == '__main__':
become_daemon()
start_app(Growl)
| {
"repo_name": "JeremyGrosser/foobox",
"path": "growl.py",
"copies": "1",
"size": "1098",
"license": "bsd-3-clause",
"hash": -466811408264345200,
"line_mean": 32.2727272727,
"line_max": 85,
"alpha_frac": 0.64571949,
"autogenerated": false,
"ratio": 3.576547231270358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9668115948127337,
"avg_score": 0.01083015462860424,
"num_lines": 33
} |
from fooapp import FooApp, start_app
from time import sleep
from datetime import tzinfo
from datetime import datetime
from xml.etree import ElementTree
from ncore.data import FileStore
from ncore.daemon import become_daemon
from ncore.rest import request
import logging
import sys
try:
import json
except ImportError:
import simplejson as json
class UPS(FooApp):
name = 'ups'
config_opts = {}
def __init__(self, user):
FooApp.__init__(self, user)
try:
self.numbers = json.loads(self.data['numbers'])
except:
self.numbers = []
try:
self.cache = json.loads(self.data['cache'])
except:
self.cache = []
def send(self, msg):
if not msg['text'] in self.numbers:
self.numbers.append(msg['text'])
self.recv('Now tracking %s' % msg['text'])
else:
self.numbers.remove(msg['text'])
self.recv('Removed %s' % msg['text'])
self.data['numbers'] = json.dumps(self.numbers)
self.update()
def run(self):
while True:
self.update()
sleep(3600)
def update(self):
if len(self.numbers) == 0:
return
for num in self.numbers:
body = '''<?xml version="1.0"?>
<AccessRequest xml:lang='en-US'>
<AccessLicenseNumber>%s</AccessLicenseNumber>
<UserId>%s</UserId>
<Password>%s</Password>
</AccessRequest>
''' % (self.config['ups']['xmlkey'], self.config['ups']['username'], self.config['ups']['password'])
body += '''<?xml version="1.0"?>
<TrackRequest xml:lang='en-US'>
<Request>
<TransactionReference>
<CustomerContext><Number>%s</Number></CustomerContext>
<XpciVersion>1.0</XpciVersion>
</TransactionReference>
<RequestAction>Track</RequestAction>
<RequestOption>activity</RequestOption>
</Request>
<TrackingNumber>%s</TrackingNumber>
</TrackRequest>''' % (num, num)
body = body.encode('ascii')
status, response = request('POST', 'https://wwwcie.ups.com/ups.app/xml/Track', body)
#logging.debug(response)
tree = ElementTree.fromstring(response)
for status in tree.findall('Shipment/Package/Activity'):
city = status.findtext('ActivityLocation/Address/City')
state = status.findtext('ActivityLocation/Address/StateProvinceCode')
dt = '%s:%s' % (status.findtext('Date'), status.findtext('Time'))
dt = datetime.strptime(dt, '%Y%m%d:%H%M%S')
desc = status.findtext('Status/StatusType/Description')
msg = '%s %s: %s' % (dt.strftime('%a %d - %H:%M'), num, desc)
if city and state:
msg = '%s (%s, %s)' % (msg, city, state)
if not msg in self.cache:
self.recv(msg)
self.cache.append(msg)
self.data['cache'] = json.dumps(self.cache)
if __name__ == '__main__':
become_daemon()
start_app(UPS, user=sys.argv[1])
| {
"repo_name": "JeremyGrosser/foobox",
"path": "ups.py",
"copies": "1",
"size": "3133",
"license": "bsd-3-clause",
"hash": 5562004739862869000,
"line_mean": 31.2989690722,
"line_max": 100,
"alpha_frac": 0.5636769869,
"autogenerated": false,
"ratio": 3.896766169154229,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4960443156054229,
"avg_score": null,
"num_lines": null
} |
from fooapp import FooApp, start_app
from time import sleep
from xmlrpclib import ServerProxy
from ncore.data import FileStore
from ncore.daemon import become_daemon
try:
import json
except ImportError:
import simplejson as json
class Hella(FooApp):
name = 'hella'
config_opts = {
'password': 'The password defined as Hellanzb.XMLRPC_PASSWORD in hellanzb.conf',
'server': 'The IP address or hostname running hellanzb',
'port': 'The port hellanzb is running on. The default is 8760',
}
def __init__(self, server=None):
FooApp.__init__(self, server)
self.hellaserver = ServerProxy('http://hellanzb:%s@%s:%s/' % (self.data['password'], self.data['server'], self.data['port']))
self.data = FileStore('/tmp/apps/hella')
try:
self.cache = json.loads(self.data['cache'])
except:
self.cache = []
def send(self, msg):
response = self.hellaserver.enqueuenewzbin(int(msg['text']))
return
def run(self):
while True:
status = self.hellaserver.status()
for entry in status['log_entries']:
for key, value in entry.items():
if not value in self.cache:
self.recv('%s: %s' % (self.name, value))
self.cache.append(value)
self.data['cache'] = json.dumps(self.cache)
sleep(10)
if __name__ == '__main__':
become_daemon()
start_app(Hella)
| {
"repo_name": "JeremyGrosser/foobox",
"path": "hella.py",
"copies": "1",
"size": "1293",
"license": "bsd-3-clause",
"hash": -8821460938804824000,
"line_mean": 27.7333333333,
"line_max": 127,
"alpha_frac": 0.6813611756,
"autogenerated": false,
"ratio": 3,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41813611755999996,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.