comparison unicode-histogram.py @ 48:99bfff1538c6

as downloaded, python2.7...
author Henry S. Thompson <ht@inf.ed.ac.uk>
date Sun, 31 Jul 2022 19:01:54 +0100
parents
children 91d71e9760e8
comparison
equal deleted inserted replaced
47:6faea25a69b3 48:99bfff1538c6
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # from https://github.com/usc-isi-i2/dig-unicode/blob/master/python/unicode-histogram.py
4 import sys
5 import re
6 try:
7 import simplejson as json
8 except:
9 import json
10
11 from collections import Counter
12 import unicodedata
13
14 from time import strftime, gmtime
15
16 """
17 12 December 2014
18 for each of {body, title}:
19 the unicodeSignature is the sequence of >ascii codepoints, in order, space-separated
20 the unicodeCatalog is the bag of >ascii codepoints, sorted/agglomerated using space, comma-separated
21 the unicodeHistogram is a json-encoded python dict/json object mapping codepoint to count
22
23 the unicodeBlockSignature is the sequence of block descriptors (of all >ascii), in order, space-separated
24 the unicodeBlockCatalog is the bag of block descriptors, sorted/agglomerated using space, comma-separated
25 the unicodeBlockHistogram is a json-encoded python dict/json object mapping block descriptor to count
26
27 the unicodeCategorySignature is the sequence of category descriptors (of all >ascii), in order, space-separated
28 the unicodeCategoryCatalog is the bag of category descriptors, sorted/agglomerated using space, comma-separated
29 the unicodeCategoryHistogram is a json-encoded python dict/json object mapping category descriptor to count
30
31 where block and category descriptors are defined via
32 # From http://stackoverflow.com/a/245072
33 # retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
34 # Blocks-5.1.0.txt
35 # Date: 2008-03-20, 17:41:00 PDT [KW]
36 and is formatted to using _ rather than ,/space/-
37 """
38
39 def isAscii(c):
40 try:
41 return ord(c) <= 127
42 except:
43 return False
44
45 def gentime():
46 return strftime("%Y-%m-%d %H:%M:%S", gmtime())
47
48 def fmtCodepoint(codepoint, style):
49 return codepoint
50
51 def fmtMetadatum(metadatum, style):
52 def fmtValue(s):
53 return re.sub("[ -]", "_", re.sub(",", "", unicode(s)))
54
55 if style=="category":
56 category = categoryCodeDescription(unicodedata.category(metadatum))
57 # return "category:" + fmtValue(category)
58 return fmtValue(category)
59 elif style=="block":
60 # return "block:" + fmtValue(block(metadatum))
61 return fmtValue(block(metadatum))
62 else:
63 return None
64
65 # From http://stackoverflow.com/a/245072
66
67 _blocks = []
68
69 def _initBlocks(text):
70 pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
71 for line in text.splitlines():
72 m = pattern.match(line)
73 if m:
74 start, end, name = m.groups()
75 _blocks.append((int(start, 16), int(end, 16), name))
76
77 # retrieved from http://unicode.org/Public/UNIDATA/Blocks.txt
78 _initBlocks('''
79 # Blocks-5.1.0.txt
80 # Date: 2008-03-20, 17:41:00 PDT [KW]
81 #
82 # Unicode Character Database
83 # Copyright (c) 1991-2008 Unicode, Inc.
84 # For terms of use, see http://www.unicode.org/terms_of_use.html
85 # For documentation, see UCD.html
86 #
87 # Note: The casing of block names is not normative.
88 # For example, "Basic Latin" and "BASIC LATIN" are equivalent.
89 #
90 # Format:
91 # Start Code..End Code; Block Name
92
93 # ================================================
94
95 # Note: When comparing block names, casing, whitespace, hyphens,
96 # and underbars are ignored.
97 # For example, "Latin Extended-A" and "latin extended a" are equivalent.
98 # For more information on the comparison of property values,
99 # see UCD.html.
100 #
101 # All code points not explicitly listed for Block
102 # have the value No_Block.
103
104 # Property: Block
105 #
106 # @missing: 0000..10FFFF; No_Block
107
108 0000..007F; Basic Latin
109 0080..00FF; Latin-1 Supplement
110 0100..017F; Latin Extended-A
111 0180..024F; Latin Extended-B
112 0250..02AF; IPA Extensions
113 02B0..02FF; Spacing Modifier Letters
114 0300..036F; Combining Diacritical Marks
115 0370..03FF; Greek and Coptic
116 0400..04FF; Cyrillic
117 0500..052F; Cyrillic Supplement
118 0530..058F; Armenian
119 0590..05FF; Hebrew
120 0600..06FF; Arabic
121 0700..074F; Syriac
122 0750..077F; Arabic Supplement
123 0780..07BF; Thaana
124 07C0..07FF; NKo
125 0900..097F; Devanagari
126 0980..09FF; Bengali
127 0A00..0A7F; Gurmukhi
128 0A80..0AFF; Gujarati
129 0B00..0B7F; Oriya
130 0B80..0BFF; Tamil
131 0C00..0C7F; Telugu
132 0C80..0CFF; Kannada
133 0D00..0D7F; Malayalam
134 0D80..0DFF; Sinhala
135 0E00..0E7F; Thai
136 0E80..0EFF; Lao
137 0F00..0FFF; Tibetan
138 1000..109F; Myanmar
139 10A0..10FF; Georgian
140 1100..11FF; Hangul Jamo
141 1200..137F; Ethiopic
142 1380..139F; Ethiopic Supplement
143 13A0..13FF; Cherokee
144 1400..167F; Unified Canadian Aboriginal Syllabics
145 1680..169F; Ogham
146 16A0..16FF; Runic
147 1700..171F; Tagalog
148 1720..173F; Hanunoo
149 1740..175F; Buhid
150 1760..177F; Tagbanwa
151 1780..17FF; Khmer
152 1800..18AF; Mongolian
153 1900..194F; Limbu
154 1950..197F; Tai Le
155 1980..19DF; New Tai Lue
156 19E0..19FF; Khmer Symbols
157 1A00..1A1F; Buginese
158 1B00..1B7F; Balinese
159 1B80..1BBF; Sundanese
160 1C00..1C4F; Lepcha
161 1C50..1C7F; Ol Chiki
162 1D00..1D7F; Phonetic Extensions
163 1D80..1DBF; Phonetic Extensions Supplement
164 1DC0..1DFF; Combining Diacritical Marks Supplement
165 1E00..1EFF; Latin Extended Additional
166 1F00..1FFF; Greek Extended
167 2000..206F; General Punctuation
168 2070..209F; Superscripts and Subscripts
169 20A0..20CF; Currency Symbols
170 20D0..20FF; Combining Diacritical Marks for Symbols
171 2100..214F; Letterlike Symbols
172 2150..218F; Number Forms
173 2190..21FF; Arrows
174 2200..22FF; Mathematical Operators
175 2300..23FF; Miscellaneous Technical
176 2400..243F; Control Pictures
177 2440..245F; Optical Character Recognition
178 2460..24FF; Enclosed Alphanumerics
179 2500..257F; Box Drawing
180 2580..259F; Block Elements
181 25A0..25FF; Geometric Shapes
182 2600..26FF; Miscellaneous Symbols
183 2700..27BF; Dingbats
184 27C0..27EF; Miscellaneous Mathematical Symbols-A
185 27F0..27FF; Supplemental Arrows-A
186 2800..28FF; Braille Patterns
187 2900..297F; Supplemental Arrows-B
188 2980..29FF; Miscellaneous Mathematical Symbols-B
189 2A00..2AFF; Supplemental Mathematical Operators
190 2B00..2BFF; Miscellaneous Symbols and Arrows
191 2C00..2C5F; Glagolitic
192 2C60..2C7F; Latin Extended-C
193 2C80..2CFF; Coptic
194 2D00..2D2F; Georgian Supplement
195 2D30..2D7F; Tifinagh
196 2D80..2DDF; Ethiopic Extended
197 2DE0..2DFF; Cyrillic Extended-A
198 2E00..2E7F; Supplemental Punctuation
199 2E80..2EFF; CJK Radicals Supplement
200 2F00..2FDF; Kangxi Radicals
201 2FF0..2FFF; Ideographic Description Characters
202 3000..303F; CJK Symbols and Punctuation
203 3040..309F; Hiragana
204 30A0..30FF; Katakana
205 3100..312F; Bopomofo
206 3130..318F; Hangul Compatibility Jamo
207 3190..319F; Kanbun
208 31A0..31BF; Bopomofo Extended
209 31C0..31EF; CJK Strokes
210 31F0..31FF; Katakana Phonetic Extensions
211 3200..32FF; Enclosed CJK Letters and Months
212 3300..33FF; CJK Compatibility
213 3400..4DBF; CJK Unified Ideographs Extension A
214 4DC0..4DFF; Yijing Hexagram Symbols
215 4E00..9FFF; CJK Unified Ideographs
216 A000..A48F; Yi Syllables
217 A490..A4CF; Yi Radicals
218 A500..A63F; Vai
219 A640..A69F; Cyrillic Extended-B
220 A700..A71F; Modifier Tone Letters
221 A720..A7FF; Latin Extended-D
222 A800..A82F; Syloti Nagri
223 A840..A87F; Phags-pa
224 A880..A8DF; Saurashtra
225 A900..A92F; Kayah Li
226 A930..A95F; Rejang
227 AA00..AA5F; Cham
228 AC00..D7AF; Hangul Syllables
229 D800..DB7F; High Surrogates
230 DB80..DBFF; High Private Use Surrogates
231 DC00..DFFF; Low Surrogates
232 E000..F8FF; Private Use Area
233 F900..FAFF; CJK Compatibility Ideographs
234 FB00..FB4F; Alphabetic Presentation Forms
235 FB50..FDFF; Arabic Presentation Forms-A
236 FE00..FE0F; Variation Selectors
237 FE10..FE1F; Vertical Forms
238 FE20..FE2F; Combining Half Marks
239 FE30..FE4F; CJK Compatibility Forms
240 FE50..FE6F; Small Form Variants
241 FE70..FEFF; Arabic Presentation Forms-B
242 FF00..FFEF; Halfwidth and Fullwidth Forms
243 FFF0..FFFF; Specials
244 10000..1007F; Linear B Syllabary
245 10080..100FF; Linear B Ideograms
246 10100..1013F; Aegean Numbers
247 10140..1018F; Ancient Greek Numbers
248 10190..101CF; Ancient Symbols
249 101D0..101FF; Phaistos Disc
250 10280..1029F; Lycian
251 102A0..102DF; Carian
252 10300..1032F; Old Italic
253 10330..1034F; Gothic
254 10380..1039F; Ugaritic
255 103A0..103DF; Old Persian
256 10400..1044F; Deseret
257 10450..1047F; Shavian
258 10480..104AF; Osmanya
259 10800..1083F; Cypriot Syllabary
260 10900..1091F; Phoenician
261 10920..1093F; Lydian
262 10A00..10A5F; Kharoshthi
263 12000..123FF; Cuneiform
264 12400..1247F; Cuneiform Numbers and Punctuation
265 1D000..1D0FF; Byzantine Musical Symbols
266 1D100..1D1FF; Musical Symbols
267 1D200..1D24F; Ancient Greek Musical Notation
268 1D300..1D35F; Tai Xuan Jing Symbols
269 1D360..1D37F; Counting Rod Numerals
270 1D400..1D7FF; Mathematical Alphanumeric Symbols
271 1F000..1F02F; Mahjong Tiles
272 1F030..1F09F; Domino Tiles
273 20000..2A6DF; CJK Unified Ideographs Extension B
274 2F800..2FA1F; CJK Compatibility Ideographs Supplement
275 E0000..E007F; Tags
276 E0100..E01EF; Variation Selectors Supplement
277 F0000..FFFFF; Supplementary Private Use Area-A
278 100000..10FFFF; Supplementary Private Use Area-B
279
280 # EOF
281 ''')
282
283 def block(ch):
284 '''
285 Return the Unicode block name for ch, or None if ch has no block.
286
287 >>> block(u'a')
288 'Basic Latin'
289 >>> block(unichr(0x0b80))
290 'Tamil'
291 >>> block(unichr(0xe0080))
292
293 '''
294
295 assert isinstance(ch, unicode) and len(ch) == 1, repr(ch)
296 cp = ord(ch)
297 for start, end, name in _blocks:
298 if start <= cp <= end:
299 return name
300
301 categoryCodeDescriptions = {'Cc': "Other, Control",
302 'Cf': "Other, Format",
303 # 'Cn': "Other, Not Assigned (no characters in the file have this property)",
304 'Cn': "Other, Not Assigned",
305 'Co': "Other, Private Use",
306 'Cs': "Other, Surrogate",
307 'LC': "Letter, Cased",
308 'Ll': "Letter, Lowercase",
309 'Lm': "Letter, Modifier",
310 'Lo': "Letter, Other",
311 'Lt': "Letter, Titlecase",
312 'Lu': "Letter, Uppercase",
313 'Mc': "Mark, Spacing Combining",
314 'Me': "Mark, Enclosing",
315 'Mn': "Mark, Nonspacing",
316 'Nd': "Number, Decimal Digit",
317 'Nl': "Number, Letter",
318 'No': "Number, Other",
319 'Pc': "Punctuation, Connector",
320 'Pd': "Punctuation, Dash",
321 'Pe': "Punctuation, Close",
322 # 'Pf': "Punctuation, Final quote (may behave like Ps or Pe depending on usage)",
323 # 'Pi': "Punctuation, Initial quote (may behave like Ps or Pe depending on usage)",
324 'Pf': "Punctuation, Final quote",
325 'Pi': "Punctuation, Initial quote",
326 'Po': "Punctuation, Other",
327 'Ps': "Punctuation, Open",
328 'Sc': "Symbol, Currency",
329 'Sk': "Symbol, Modifier",
330 'Sm': "Symbol, Math",
331 'So': "Symbol, Other",
332 'Zl': "Separator, Line",
333 'Zp': "Separator, Paragraph",
334 'Zs': "Separator, Space"}
335
336 def categoryCodeDescription(category):
337 return categoryCodeDescriptions.get(category, "Not Available")
338
339 def analyze(part):
340 content = part["text"]
341 codepointSeq = []
342 categorySeq = []
343 blockSeq = []
344 codepointHisto = Counter()
345 categoryHisto = Counter()
346 blockHisto = Counter()
347 for c in content:
348 if not isAscii(c):
349 codepointHisto[c] += 1
350 codepointSeq.append(c)
351 cat = fmtMetadatum(c, 'category')
352 blk = fmtMetadatum(c, 'block')
353 if cat:
354 categoryHisto[cat] += 1
355 categorySeq.append(cat)
356 if blk:
357 blockHisto[blk] += 1
358 blockSeq.append(blk)
359 # Normal form KD
360 # presumed of minor importance: omitted for now
361 # categoryHisto["normalized:" + unicodedata.normalize(c.decode('utf-8'),'NFKD')] += 1
362 contentElements = codepointSeq
363 # Histogram: JSON-encoded string repn of the dict
364 part["unicodeHistogram"] = json.dumps(codepointHisto)
365 # Signature: sequence of codepoints
366 part["unicodeSignature"] = " ".join(codepointSeq)
367 # Catalog: bag of codepoints
368 codepointCatalogElements = []
369 for k in sorted(codepointHisto.keys()):
370 v = codepointHisto[k]
371 # v copies of this key
372 codepointCatalogElements.append(" ".join([k for _ in xrange(v)]))
373 part["unicodeCatalog"] = ", ".join(codepointCatalogElements)
374
375 # Histogram: JSON-encoded string repn of the dict
376 part["unicodeCategoryHistogram"] = json.dumps(categoryHisto)
377 # Signature: sequence of codepoints
378 part["unicodeCategorySignature"] = " ".join(categorySeq)
379 # Catalog: bag of categories
380 categoryCatalogElements = []
381 for k in sorted(categoryHisto.keys()):
382 v = categoryHisto[k]
383 # v copies of this key
384 categoryCatalogElements.append(" ".join([k for _ in xrange(v)]))
385 part["unicodeCategoryCatalog"] = ", ".join(categoryCatalogElements)
386
387 # Histogram: JSON-encoded string repn of the dict
388 part["unicodeBlockHistogram"] = json.dumps(blockHisto)
389 # Signature: sequence of codepoints
390 part["unicodeBlockSignature"] = " ".join(blockSeq)
391 # Catalog: bag of blocks
392 blockCatalogElements = []
393 for k in sorted(blockHisto.keys()):
394 v = blockHisto[k]
395 # v copies of this key
396 blockCatalogElements.append(" ".join([k for _ in xrange(v)]))
397 part["unicodeBlockCatalog"] = ", ".join(blockCatalogElements)
398
399 return part
400
401 Test data
402 HEART = u'\u2665'
403 SMILY = u'\u263a'
404 TSU = u'\u30C4'
405 LEFT = u'\u27E8'
406 RIGHT = u'\u27E9'
407 EURO = u'\u20AC'
408
409 if True:
410
411 TESTUNICODE = LEFT + "h" + EURO + "llo " + HEART + HEART + SMILY + TSU + " goodby" + EURO + " " + SMILY + TSU + HEART + HEART + HEART + HEART + RIGHT
412
413 print len(TESTUNICODE)
414 print json.dumps(TESTUNICODE)
415
416 TESTDOC = {"@context": "http://localhost:8080/publish/JSON/WSP1WS6-select unix_timestamp(a_importtime)*1000 as timestamp, a_* from ads a join sample s on a_id=s_id limit 50-context.json","schema:provider": {"a": "Organization", "uri": "http://memex.zapto.org/data/organization/1"}, "snapshotUri": "http://memex.zapto.org/data/page/850753E7323B188B93E6E28F730F2BFBFB1CE00B/1396493689000/raw","a": "WebPage","dateCreated": "2013-09-24T18:28:00","hasBodyPart": {"text": TESTUNICODE, "a": "WebPageElement"}, "hasTitlePart": {"text": "\u270b\u270b\u270bOnly Best \u270c\u270c\u270c Forget The Rest \u270b\u270b\u270b Outcall Specials TONIGHT \u270c\ud83d\udc8b\ud83d\udc45 Sexy Blonde is UP LATE \ud83d\udc9c\ud83d\udc9b\u270b\u270c - 25", "a": "WebPageElement"}, "uri": "http://memex.zapto.org/data/page/850753E7323B188B93E6E28F730F2BFBFB1CE00B/1396493689000/processed"}
417
418 analyze(TESTDOC["hasBodyPart"])
419 json.dump(TESTDOC, sys.stdout, indent=4);
420 exit(0)
421
422 for line in sys.stdin:
423 try:
424 (url, jrep) = line.split('\t')
425 d = json.loads(jrep)
426
427 analyze(d["hasBodyPart"])
428 analyze(d["hasTitlePart"])
429 # insert gmtime
430 # ensure it doesn't collide with any other gentime
431 d["unicodeGentime"] = gentime()
432
433 print url + "\t",
434 json.dump(d, sys.stdout, sort_keys=True)
435 print
436 except ValueError as e:
437 print >> sys.stderr, e
438 pass