Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def generalizeCFF(otfPath, do_sfnt=True):
"""
Adapted from similar routine in 'buildmasterotfs'. This uses
temp files for both tx output and sfntedit output instead of
overwriting 'otfPath', and also provides an option to skip
the sfntedit step (so 'otfPath' can be either a .otf file or
a .cff file).
"""
tmp_tx_path = get_temp_file_path()
out_path = get_temp_file_path()
shutil.copyfile(otfPath, out_path, follow_symlinks=True)
if not run_shell_command(['tx', '-cff', '+b', '-std', '-no_opt',
otfPath, tmp_tx_path]):
raise Exception
if do_sfnt:
if not run_shell_command(['sfntedit', '-a',
f'CFF ={tmp_tx_path}', out_path]):
raise Exception
return out_path
else:
def generalizeCFF(otfPath, do_sfnt=True):
"""
Adapted from similar routine in 'buildmasterotfs'. This uses
temp files for both tx output and sfntedit output instead of
overwriting 'otfPath', and also provides an option to skip
the sfntedit step (so 'otfPath' can be either a .otf file or
a .cff file).
"""
tmp_tx_path = get_temp_file_path()
out_path = get_temp_file_path()
shutil.copyfile(otfPath, out_path, follow_symlinks=True)
if not run_shell_command(['tx', '-cff', '+b', '-std', '-no_opt',
otfPath, tmp_tx_path]):
raise Exception
if do_sfnt:
if not run_shell_command(['sfntedit', '-a',
f'CFF ={tmp_tx_path}', out_path]):
raise Exception
return out_path
else:
temp files for both tx output and sfntedit output instead of
overwriting 'otfPath', and also provides an option to skip
the sfntedit step (so 'otfPath' can be either a .otf file or
a .cff file).
"""
tmp_tx_path = get_temp_file_path()
out_path = get_temp_file_path()
shutil.copyfile(otfPath, out_path, follow_symlinks=True)
if not run_shell_command(['tx', '-cff', '+b', '-std', '-no_opt',
otfPath, tmp_tx_path]):
raise Exception
if do_sfnt:
if not run_shell_command(['sfntedit', '-a',
f'CFF ={tmp_tx_path}', out_path]):
raise Exception
return out_path
else:
return tmp_tx_path
bp.write(bezString)
if options.doAlign:
doAlign = "-ra"
else:
doAlign = "-rs"
if options.allStems:
allStems = "-a"
else:
allStems = ""
command = AUTOHINTEXE + " -q %s %s -f \"%s\" \"%s\" 2>&1" % (doAlign, allStems, tempFI, tempBez)
if options.debug:
print(command)
log = fdkutils.runShellCmd(command)
if log:
print(log)
if "number terminator while" in log:
print(tempBez)
sys.exit()
if os.path.exists(tempReport):
with open(tempReport, "r", encoding='utf-8') as bp:
report = bp.read()
if options.debug:
print("Wrote AC fontinfo data file to", tempFI)
print("Wrote AC output rpt file to", tempReport)
report.strip()
if report:
glyphReports.addGlyphReport(report)
if options.debug:
def getOptions():
options = ACOptions()
i = 1
numOptions = len(sys.argv)
while i < numOptions:
arg = sys.argv[i]
if options.fileList and arg[0] == "-":
raise ACOptionParseError("Option Error: All file names must follow all other options <%s>." % arg)
if arg == "-h":
logMsg(__help__)
sys.exit(0)
elif arg == "-u":
logMsg(__usage__)
sys.exit(0)
elif arg == "-all":
options.allStems = 1
elif arg == "-a":
options.doAlign = 1
elif arg == "-d":
options.debug = 1
elif arg == "-q":
options.verbose = 0
elif arg == "-o":
i = i +1
options.reportPath = sys.argv[i]
elif arg == "-new":
options.new = 1
elif arg in ["-xg", "-g"]:
if arg == "-xg":
def getOptions():
options = ACOptions()
i = 1
numOptions = len(sys.argv)
while i < numOptions:
arg = sys.argv[i]
if options.fileList and arg[0] == "-":
raise ACOptionParseError("Option Error: All file names must follow all other options <%s>." % arg)
if arg == "-h":
logMsg(__help__)
sys.exit(0)
elif arg == "-u":
logMsg(__usage__)
sys.exit(0)
elif arg == "-all":
options.allStems = 1
elif arg == "-a":
options.doAlign = 1
elif arg == "-d":
options.debug = 1
elif arg == "-q":
options.verbose = 0
elif arg == "-o":
i = i +1
options.reportPath = sys.argv[i]
elif arg == "-new":
def getSourceGOADBData(inputFilePath):
# First, get the Unicode mapping from the TTF cmap table.
success, output = fdkutils.get_shell_command_output([
'spot', '-t', 'cmap=7', inputFilePath])
if not success:
raise MakeOTFShellError
spotGlyphList = re.findall(r"[\n\t]\[(....+)\]=<([^>]+)>", output)
# Because this dumps all the Unicode map tables, there are a number
# of duplicates; weed them out, and strip out gid part of spot name
gDict = {}
for entry in sorted(set(spotGlyphList)):
uni = entry[0]
gname, gid_str = entry[1].split('@')
gid = int(gid_str)
if gid in gDict:
print("makeotf [Warning] Source TTF font contains multiple "
"Unicode values for glyph '%s'. Only the first ('%s') "
gname, gid_str = entry[1].split('@')
gid = int(gid_str)
if gid in gDict:
print("makeotf [Warning] Source TTF font contains multiple "
"Unicode values for glyph '%s'. Only the first ('%s') "
"will be used. Additional Unicode value: %s" %
(gname, gDict[gid], uni))
else:
gDict[gid] = uni
# Now get the font glyph name list, so as to get the glyphs with
# no unicode mapping. We'll also use this to set the glyph order.
# I use tx so as to get the same names as tx for the TTF glyphs;
# this can differ from spot. I don't use tx for Unicode values.
# as tx doesn't check 32 bit UV's, and doesn't report double-encodings.
success, output = fdkutils.get_shell_command_output([
'tx', '-mtx', inputFilePath])
if not success:
raise MakeOTFShellError
txGlyphList = re.findall(r"[\n\r]glyph\[(\d+)\]\s+{([^,]+)", output)
gnameDict = {}
for gid_str, gname in txGlyphList:
gid = int(gid_str)
gnameDict[gid] = gname
if gid not in gDict:
gDict[gid] = None
# Now flatten this to a GOADB list.
goadbList = []
for gid, uni_val in sorted(gDict.items()):
def save(self, file):
#avoid crashes if they wrote nothing in the page
if self.data == None:
self.data = TestStream
if self.compression == 1:
comp = zlib.compress(self.data) #this bit is very fast...
base85 = pdfutils._AsciiBase85Encode(comp) #...sadly this isn't
data_to_write = pdfutils._wrap(base85)
else:
data_to_write = self.data
# the PDF length key should contain the length including
# any extra LF pairs added by Print on DOS.
#lines = len(string.split(self.data,'\n'))
#length = len(self.data) + lines # one extra LF each
length = len(data_to_write) + len(LINEEND) #AR 19980202
if self.fontType == None:
fontStreamEntry = ""
else:
fontStreamEntry = "/Subtype %s" % (self.fontType)
if self.compression:
file.write(('<< %s /Length %d /Filter [/ASCII85Decode /FlateDecode] >>' % (fontStreamEntry, length) + LINEEND).encode('utf-8'))
else:
def save(self, file):
#avoid crashes if they wrote nothing in the page
if self.data == None:
self.data = TestStream
if self.compression == 1:
comp = zlib.compress(self.data) #this bit is very fast...
base85 = pdfutils._AsciiBase85Encode(comp) #...sadly this isn't
wrapped = pdfutils._wrap(base85)
data_to_write = wrapped
else:
data_to_write = self.data
# the PDF length key should contain the length including
# any extra LF pairs added by Print on DOS.
#lines = len(string.split(self.data,'\n'))
#length = len(self.data) + lines # one extra LF each
length = len(data_to_write) + len(LINEEND) #AR 19980202
if self.fontType == None:
fontStreamEntry = ""
else:
fontStreamEntry = "/Subtype %s" % (self.fontType)
if self.compression: