-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathtextProof.py
496 lines (387 loc) · 14.7 KB
/
textProof.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
# Copyright 2023 Adobe
# All Rights Reserved.
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying
# it.
'''
Creates example paragraphs corresponding to a given character set.
Default mode is creating single-page PDF with a random subset of the requested
charset, alternatively a full charset can be consumed systematically, to show
as many characters as possible.
Known bug:
line spacing may become inconsistent if a character set beyond the font’s
character support is requested (this is a macOS limitation caused by the
vertical metrics in a given fallback font).
Input: folder containing fonts, or single font file. Optionally, secondary
font(s) can be specified (for mixing Roman and Italic, for example).
'''
import re
import sys
import argparse
import itertools
import random
import subprocess
import textwrap
import drawBot as db
from fontTools.ttLib import TTFont
from pathlib import Path
from proofing_helpers import fontSorter
from proofing_helpers import charsets as cs
from proofing_helpers.globals import FONT_MONO, ADOBE_BLANK
from proofing_helpers.helpers import list_uni_names
from proofing_helpers.files import (
get_font_paths, chain_charset_texts, read_text_file, make_temp_font)
from proofing_helpers.stamps import timestamp
DOC_SIZE = 'Letter'
MARGIN = 12
class TextContainer(object):
def __init__(self, text, italic=False, paragraph=False):
self.text = text.strip()
self.italic = italic
self.paragraph = paragraph
class RawDescriptionAndDefaultsFormatter(
# https://stackoverflow.com/a/18462760
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter
):
pass
def get_options():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionAndDefaultsFormatter
)
charset_choices = [name for name in dir(cs) if not name.startswith('_')]
parser.add_argument(
'fonts',
nargs='+',
metavar='FONT',
help='font file or folder')
parser.add_argument(
'-c', '--charset',
action='store',
default='al3',
choices=charset_choices,
help='character set')
parser.add_argument(
'--filter',
action='store',
metavar='ABC',
help='required characters')
parser.add_argument(
'--capitalize',
action='store_true',
default=False,
help='capitalize output')
parser.add_argument(
'-p', '--pt_size',
action='store',
default=10,
type=int,
help='point size for sample')
parser.add_argument(
'-k', '--kerning_off',
default=False,
action='store_true',
help='switch off kerning')
parser.add_argument(
'-a', '--full',
action='store_true',
help='consume whole character set')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='report information about the characters used')
parser.add_argument(
'-s', '--secondary_fonts',
nargs='+',
metavar='FONT',
default=[],
help='secondary font file or folder')
return parser.parse_args()
def merge_chunks(chunks, chunk_length=5):
output = []
appended = 0
for chunk in chunks:
if len(chunk) < chunk_length and appended > 0:
output[appended - 1] += chunk
else:
output.append(chunk)
appended += 1
return output
def consume_charset(content_list, charset):
'''
Keep collecting paragraphs until every character of a given charset has
been used.
'''
character = random.choice(list(charset))
found_paragraphs = [p for p in content_list if character in p]
if found_paragraphs:
paragraph_pick = random.choice(found_paragraphs)
remaining_charset = set(charset) - set(paragraph_pick)
else:
paragraph_pick, remaining_charset = consume_charset(
content_list, charset)
return paragraph_pick, remaining_charset
def message_with_charset(message, characters, wrap_length=70):
chars = '\n'.join(textwrap.wrap(' '.join(sorted(characters)), wrap_length))
print(f'{message} ({len(characters)}):\n{chars}\n')
def analyze_missing(content_pick, content_list, charset):
'''
Report stats about the chosen character set, which characters were
used in the sample, etc.
'''
abc = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
missing_abc = set(abc) - set(''.join(content_pick))
missing_charset = set(charset) - set(''.join(content_pick))
missing_cset_source = set(charset) - set(''.join(content_list))
message_with_charset(charset_name.upper(), charset)
if missing_abc:
message_with_charset('missing alphabetic characters', missing_abc)
if missing_charset:
message_with_charset(
f'missing {charset_name.upper()} characters in output text',
missing_charset)
if missing_cset_source:
print(
f'missing {charset_name.upper()} characters '
f'in source text ({len(missing_cset_source)}):')
list_uni_names(missing_cset_source)
def make_proof(content, fonts_pri, fonts_sec, args, output_name):
db.newDrawing()
fea_dict = {
'liga': True,
# 'onum': True,
# 'pnum': True,
# 'tnum': True,
}
if args.kerning_off:
fea_dict['kern'] = False
if fonts_sec:
font_pairs = list(itertools.product(fonts_pri, fonts_sec))
num_combinations = len(font_pairs)
if num_combinations > 20:
print(f'proofing {num_combinations} font combinations …')
for font_pri, font_sec in font_pairs:
fs = make_formatted_string(
content, font_pri, font_sec, args.pt_size, fea_dict)
make_page(fs, font_pri, font_sec, args)
else:
for font in fonts_pri:
fs = make_formatted_string(
content, font, None, args.pt_size, fea_dict)
make_page(fs, font, None, args)
pdf_path = Path(f'~/Desktop/{output_name}.pdf')
db.saveImage(pdf_path)
db.endDrawing()
print(f'saved to {pdf_path}')
subprocess.call(['open', pdf_path.expanduser()])
def make_formatted_string(content, font_pri, font_sec, pt_size, fea_dict):
'''
make a formatted string which has different kinds of fonts/formatting
'''
fs = db.FormattedString(
fontSize=pt_size,
fallbackFont=ADOBE_BLANK,
openTypeFeatures=fea_dict,
)
for text_item in content:
if text_item.italic and font_sec:
tmp_font_sec = temp_fonts[font_sec]
fs.append(text_item.text, font=tmp_font_sec)
else:
tmp_font_pri = temp_fonts[font_pri]
fs.append(text_item.text, font=tmp_font_pri)
if text_item.paragraph:
fs.append('\n\n')
else:
fs.append(' ')
return fs
def make_page(fs, font_pri, font_sec, args):
db.newPage(DOC_SIZE)
if charset_name == 'abc':
# We do not want any non-ABC characters (such as the hyphen)
# in an ABC-only proof
db.hyphenation(False)
else:
db.hyphenation(True)
footer_label = f'{timestamp(readable=True)} | {font_pri.name}'
if font_sec:
footer_label += f' + {font_sec.name}'
footer_label += f' | {args.pt_size} pt'
if args.kerning_off:
footer_label += ' (no kerning)'
fs_footer = db.FormattedString(
footer_label,
font=FONT_MONO,
fontSize=6,
)
fs_overflow = db.textBox(
fs, (
6 * MARGIN, 5 * MARGIN,
db.width() - 9 * MARGIN, db.height() - 7 * MARGIN
))
db.textBox(fs_footer, (6 * MARGIN, 0, db.width(), 3 * MARGIN))
if fs_overflow and args.full:
make_page(
fs_overflow, font_pri, font_sec, args)
else:
return fs_overflow
def format_content(content_list, len_limit=None, capitalize=False):
total_length = 0
formatted_content = []
for paragraph in content_list:
if capitalize:
paragraph = paragraph.upper()
# do not split if a number or capital letter precedes the period
raw_chunks = re.split(r'((?<!\d|[A-Z])[\.:])', paragraph)
chunks = merge_chunks(raw_chunks)
for chunk in chunks:
total_length += len(chunk)
t_container = TextContainer(chunk)
if len(chunk) > 140 and random.random() > 0.75:
t_container.paragraph = True
if random.random() < 0.6:
t_container.italic = True
formatted_content.append(t_container)
if len_limit is not None and total_length >= len_limit:
return formatted_content
return formatted_content
def filter_paragraphs(content_list, req_chars):
'''
find paragraph(s) containing all or (at least) one required character
'''
paragraphs_containing_all = [
p for p in content_list if set(p) >= set(req_chars)
]
if paragraphs_containing_all:
# paragraph(s) containing all characters have been found
req_paragraph = random.choice(paragraphs_containing_all)
num_paragraphs = len(paragraphs_containing_all)
content_list.insert(0, req_paragraph)
print(f'matching paragraph ({req_chars} -- {num_paragraphs} found):')
print('\n'.join(textwrap.wrap(req_paragraph, 70)))
print()
else:
for c_index, char in enumerate(req_chars):
# paragraphs for individual characters have been found
paragraphs_containing_one = [
p for p in content_list if char in p]
if paragraphs_containing_one:
req_paragraph = random.choice(
paragraphs_containing_one)
content_list.insert(c_index, f'[{char}] {req_paragraph}')
print(f'matching paragraph ({char}):')
print('\n'.join(textwrap.wrap(req_paragraph, 70)))
print()
else:
# nothing has been found for that character
print(f'no paragraph found for ({char})')
return content_list
def get_content_list(charset_name):
'''
Chain external text files based on a given (validated) charset name,
split lines into a list, shuffle, and return
'''
charset_has_level = re.match(r'..(\d)', charset_name)
if charset_has_level:
charset_prefix = charset_name[:2]
max_charset_level = int(charset_has_level.group(1))
raw_content = chain_charset_texts(charset_prefix, max_charset_level)
else:
# abc charset, does not have a level
text_file_name = f'_content/{charset_name.upper()}.txt'
raw_content = read_text_file(text_file_name)
content_list = raw_content.split('\n')
random.shuffle(content_list)
return content_list
def validate_charset(charset_name):
try:
target_charset = eval(f'cs.{charset_name.lower()}')
except NameError:
sys.exit(f'Character set "{charset_name}" is not defined')
return target_charset
def get_glyphs_per_page(font, pt_size):
ttfont = TTFont(font)
avg_glyph_width = ttfont['OS/2'].xAvgCharWidth
upm = ttfont['head'].unitsPerEm
# A Letter page is 8.5 by 11 inches. 1 inch contains 72 dtp points.
# Therefore, 11 * 72, divided by the chosen point size * 1.2 (which is the
# typical leading factor) results in the number of lines possible per page.
lines_per_page = (11 * 72) / (pt_size * 1.2)
glyphs_per_line = (8.5 * 72) / (avg_glyph_width / upm * pt_size)
glyphs_per_page = int(round(lines_per_page * glyphs_per_line))
return glyphs_per_page
def make_output_name(fonts_pri, fonts_sec, cs_name, pt, full):
'''
Make an output filename based on the input fonts.
Not completely exhaustive. There could be a lot of combinations, so
this is erring on simplicity rather than overkill.
'''
path_pri = Path(fonts_pri[0])
# include the primary font- or folder name
output_name = f'text proof {path_pri.stem}'
if fonts_sec:
# include the secondary font- or folder name, if it exists.
# further fonts or folders are ignored.
path_sec = Path(fonts_sec[0])
if path_sec.is_file():
output_name += ' vs'
output_name += f' {path_sec.stem}'
output_name += f' {cs_name} {pt}pt'
if full:
output_name += ' full'
return output_name
def make_formatted_content(
content_list, charset,
len_limit=None, char_filter=None, capitalize=False, full=False
):
if full:
# Some characters are hard to find, so the source text might not
# contain all of the characters for the given charset.
acceptable_omissions = len(
set(charset) - set(''.join(content_list)))
full_content = []
remaining_charset = charset
while len(remaining_charset) > acceptable_omissions:
paragraph, remaining_charset = consume_charset(
content_list, remaining_charset)
full_content.append(paragraph)
formatted_content = format_content(full_content, capitalize)
else:
if char_filter:
content_list = filter_paragraphs(content_list, char_filter)
formatted_content = format_content(content_list, len_limit, capitalize)
return formatted_content
def get_fonts(input_paths):
fonts = []
for i, path_name in enumerate(input_paths):
fonts.extend(get_font_paths(Path(path_name)))
fonts = fontSorter.sort_fonts(fonts, alternate_italics=True)
return fonts
if __name__ == '__main__':
args = get_options()
charset_name = args.charset
charset = validate_charset(charset_name)
content_list = get_content_list(charset_name)
fonts_pri = get_fonts(args.fonts)
fonts_sec = get_fonts(args.secondary_fonts)
gpp_count = 0
temp_fonts = {}
for i, font in enumerate(fonts_pri + fonts_sec):
# Make temporary fonts, and calculate how many glyphs of the given
# font may fit on a page
temp_fonts[font] = make_temp_font(i, font)
gpp_count += get_glyphs_per_page(font, args.pt_size)
# This is not completely representative of the # of glyphs/page,
# but it is a useful approximation.
len_limit = gpp_count / (len(fonts_pri) + len(fonts_sec))
formatted_content = make_formatted_content(
content_list, charset,
len_limit, args.filter, args.capitalize, args.full)
output_name = make_output_name(
args.fonts, args.secondary_fonts,
charset_name, args.pt_size, args.full)
make_proof(formatted_content, fonts_pri, fonts_sec, args, output_name)
if args.verbose:
content_pick = [fc.text for fc in formatted_content]
analyze_missing(content_pick, content_list, charset)