File size: 13,066 Bytes
40b0307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
#!/usr/bin/env python3
#
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
#
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

from collections import namedtuple
from datetime import datetime
import json
import re
from urllib.parse import urlparse

from bs4 import BeautifulSoup
import dateutil.parser
import dateutil.tz
import regex

DomainSettings = namedtuple("DomainSettings",
                            ["encoding",
                             "l_tag", "l_class",
                             "document_extractor",
                             "abstract_extractor",
                             "headline_extractor",
                             "date_extractor"
                            ]
                           )

month_mapping = {
        "ledna": 1,
        "ůnora": 2,
        "února": 2,
        "března": 3,
        "dubna": 4,
        "května": 5,
        "června": 6,
        "července": 7,
        "srpna": 8,
        "září": 9,
        "října": 10,
        "listopadu": 11,
        "prosince": 12,
}

date_re = re.compile(r"(\d{1,2})\.\s([^\W\d_]+)\s(\d{4}),?\s(\d{1,2}):(\d{2})")
cet_tz = dateutil.tz.gettz("CET")
author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)
multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE)
pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE)

# regexes for heuristics to remove extra links
ctete_take_re = re.compile("Čtěte také.*", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)

caption_re = re.compile("\w\s*$", flags=re.DOTALL+re.MULTILINE)
author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE)
multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE)
pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE)

### NOVINKY
novinky_starts_of_interest_re = re.compile("Nové knihy|Nová DVD|Nová CD|Premiéry|Vánoční knihy|Vánoční DVD|Vánoční CD")

### LIDOVKY
lidovky_starts_of_interest_re = re.compile(r"Mistrovství Evropy ve fotbale 2016|MS v ledním hokeji|Kryje inspekce policejní zátaras z lidí\? O šetření nehody na D1 rozhodne soud")

### DENIK
denik_starts_of_interest_re = re.compile("Právě jsme se narodila. Vaše miminka|Právě jsme se narodili|Pozvánk")
denik_abstract_fix_re = regex.compile(r"(.*?(/[\p{Lu} ,-]*?/)? ?[-–])|(/[\p{Lu} ,-]*?/)")

def parse_czech_month_date(datetime_str):
    '''Parse date in format with Czech month names.
    Used for:
        - lidovky.cz (dates like '2. ledna 2012 19:47')
        - novinky.cz (dates like 'pondělí 4. srpna 2003, 1:57')
    '''
    match = date_re.search(datetime_str.lower())
    if match is not None:
        dt = datetime(int(match.group(3)), month_mapping[match.group(2)], int(match.group(1)), # YMD
                      int(match.group(4)), int(match.group(5)), 0, 0, cet_tz) #HMS us timezone
        return dt.strftime("%Y-%m-%dT%H:%M:%S%z")
    return None

def parse_iso_date(datetime_str):
    '''Parse date in ISO format.
    Used for:
        - denik.cz (dates like '2009-08-20T21:00:00+02:00')
        - idnes.cz (dates like '2015-12-17T06:57CET')
    '''
    # parse date
    dt = dateutil.parser.parse(datetime_str)
    # normalize timezone
    if dt.tzinfo is None:
      dt = dt.replace(tzinfo=cet_tz)
    dt = dt.astimezone(cet_tz)
    # format output
    return dt.strftime("%Y-%m-%dT%H:%M:%S%z")

# generic
def dont_clean(raw_text):
    '''Dummy function for text that doesn't need cleaning.
    '''
    return raw_text

def abstract_to_text(raw_abstract):
    '''Clean abstract.
    '''
    return raw_abstract.text.strip()

def process_text(raw_text, domain, clean_domain_specific_text):
    '''Clean text.
    '''
    # domain-independent: remove tags not belonging to the text
    for tag in raw_text(['script', 'h1', 'h2', 'h3', 'img']):
        tag.extract()

    # domain-specific
    raw_text = clean_domain_specific_text(raw_text)

    # add linebreak between paragraphs; ugly but it works
    raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(raw_text)))
    cleaned_text = BeautifulSoup(raw_text, 'html.parser').text

    if domain == 'denik.cz':
        cleaned_text = ctete_take_re.sub('', cleaned_text)

    # clean multiple linebreaks
    cleaned_text = multiple_linebreaks_re.sub('\n', cleaned_text.replace('  ', '\n').strip())
    return cleaned_text

def remove_headline_intro(headline):
    '''Remove some non-informative headline intros'''
    return re.sub(r'^(VIDEO|OBRAZEM|Autofotka týdne|Finanční poradna|Tipy na víkend' +
                    '|RECENZE|Český poutník|Kam o víkendu s dětmi|TEST|Tip na výlet' +
                    '|KOMENTÁŘ|Průzkum|S kamerou na cestách|Video týdne|Rady do zahrady' +
                    '|POHNUTÉ OSUDY|ANALÝZA|Test|BAZAR|Putování s BBC|Co vám uniklo|ON-LINE' +
                    '|Potvrzeno|ANKETA|Otázky čtenářů|Poslední slovo|Je to oficiální' +
                    '|GLOSA|PŘEHLEDNĚ|ROZHOVOR|Výzkum|NÁZOR|ON-LINE CHAT|Na poslední chvíli' +
                    '|TOP \d+ TÝDNE|Dlouhodobý test|FOTO|FOTO, VIDEO|DOKUMENT|EXKLU[SZ]IVNĚ' +
                    '|CO VÁS ZAUJME|ANIMACE|ON-LINE REPORTÁŽ|BYDLENÍ|SOUTĚŽ|RETRO|AUDIO' +
                    '|KRÁTCE|AUTOVIDEA ROKU|REPORTÁŽ|PODÍVEJTE SE|VIDEOTEST|Ukázka|TÉMA' +
                    '|\d+\.\s*kolo(\s+ELH)?)\s*:\s*', '', headline, flags=re.I)

def process_headline(raw_headline, clean_headline):
    headline = raw_headline.text
    headline = remove_headline_intro(headline)
    headline = clean_headline(headline)
    return headline


def generic_headline_extractor(headline_cleaner):
    def f(soup):
        headline_list = soup.find_all('h1')

        # headline found, clean it
        headline_text = process_headline(headline_list[0], headline_cleaner)
        return headline_text

    return f


def ceskenoviny_headline_extractor(soup):
    headline_soup = soup.find('h1', itemprop='name')
    headline_text = headline_soup.text.strip()
    return headline_text


def generic_abstract_extractor(s_tag, s_class, abstract_cleaner):
    def f(soup):
        abstract_soup = soup.find(s_tag, class_=s_class)
        abstract_text = abstract_cleaner(abstract_soup)
        return abstract_text

    return f


def ceskenoviny_abstract_extractor(soup):
    # check for article
    article_soup = soup.find('div', itemprop='articleBody')
    abstract_soup = article_soup.find('p', itemprop='description')
    if '-' in abstract_soup.text:
        abstract_text = abstract_soup.text.split('-', maxsplit=1)[1].strip()
    else:
        abstract_text = abstract_soup.text.strip()
    abstract_soup.extract()
    return abstract_text


def generic_text_extractor(t_tag, t_class, text_cleaner):
    def f(soup, domain):
        # check for the full text
        document_soup = soup.find(t_tag, class_=t_class)
        document_text = process_text(document_soup, domain, text_cleaner)
        return document_text
    return f


def ceskenoviny_text_extractor(soup, _):
    article_soup = soup.find('div', itemprop='articleBody')
    for tag in article_soup(['div', 'ul', 'script', 'img']):
        tag.extract()
    tags = article_soup.find('p', class_='tags')
    if tags is not None:
        tags.extract()

    # add linebreak between paragraphs; ugly but it works
    raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(article_soup)))
    clean_text = BeautifulSoup(raw_text, 'html.parser').text

    clean_text = multiple_linebreaks_re.sub('\n', clean_text.replace('  ', '\n').strip())
    return clean_text


def ceskenoviny_date_extractor(soup):
    dt_str = soup.find('span', itemprop='datePublished').text
    dt = datetime.strptime(dt_str, "%d.%m.%Y, %H:%M").replace(tzinfo=cet_tz)
    published = dt.strftime("%Y-%m-%dT%H:%M:%S%z")
    return published

# domain-specific cleaner functions
### NOVINKY
def clean_novinky_headline(headline):
    if novinky_starts_of_interest_re.match(headline) is not None and\
       '-' in headline:
        return headline.split('-', maxsplit=1)[1].strip()
    return headline

def clean_novinky_text(raw_text):
    '''Clean text from novinky.cz
    '''
    # photos, videos
    for tag in raw_text('div', ['articlePhotos', 'articleVideo']):
        tag.extract()
    # picture and video descriptions
    for tag in raw_text('p', ['acmDescription', 'acmAuthor']):
        tag.extract() 
    # picture and video descriptions: authors
    for tag in raw_text('p', string=author_re):
        tag.extract() 
    # subcaptions
    for tag in raw_text('h4', string=caption_re):
        tag.extract() 
    # see also, galleries, etc.
    for tag in raw_text('table', 'table_1'):
        tag.extract()

    return raw_text

def novinky_date(soup):
    '''Example:
    <p id="articleDate" class="publicDate">  čtvrtek 31. července 2003, 13:22  </p>
    '''
    date_soup = soup.find('p', id='articleDate')
    if date_soup is None:
        return None
    return parse_czech_month_date(date_soup.text.split('-')[0].strip())

### LIDOVKY
def clean_lidovky_text(raw_text):
    '''Clean text from lidovky.cz
    '''
    # see also, galleries, etc.
    for tag in raw_text('table', ['not4bbtext']):
        tag.extract()
    for tag in raw_text('blockquote', class_='twitter-tweet'):
        tag.extract()
    # remove the abstract, if present
    for tag in raw_text('p', ['opener']):
        tag.extract()
    return raw_text

def clean_lidovky_abstract(abstract):
    for tag in abstract('span', 'domicil'):
        tag.extract()
    return abstract.text.strip()

def lidovky_date(soup):
    '''Example:
    <span class="time">
        2. ledna 2012&nbsp;19:47
    </span>
    '''
    date_soup = soup.find('span', class_='time')
    if date_soup is None:
        return None
    datetime_str = date_soup.text.split(',')[0].strip().replace('&nbsp;', ' ')
    return parse_czech_month_date(datetime_str)

### DENIK
def clean_denik_abstract(abstract):
    abstract = abstract.text.strip()
    abstract = denik_abstract_fix_re.sub('', abstract.rstrip('-–')).strip()
    return abstract

def denik_date(soup):
    '''Example:
    <meta property="article:published_time" content="2011-01-24T20:00:00+01:00">\
    '''
    date_soup = soup.find('meta', property="article:published_time")
    if date_soup is None:
        return None
    return parse_iso_date(date_soup['content'])

### IDNES
def idnes_date(soup):
    '''Example:
    <span class="time-date" itemprop="datePublished" content="2012-07-02T15:18CET">
    '''
    date_soup = soup.find('span', itemprop="datePublished")
    if date_soup is None:
        return None
    return parse_iso_date(date_soup['content'].strip())

### CESKENOVINY

### Final config dictionary
domain_settings_dict = {
    'novinky.cz': DomainSettings('utf-8',
                                 None, None,
                                 generic_text_extractor('div','articleBody', clean_novinky_text),
                                 generic_abstract_extractor('p', 'perex', abstract_to_text),
                                 generic_headline_extractor(clean_novinky_headline),
                                 novinky_date),
    'lidovky.cz': DomainSettings('windows-1250',
                                 'div', 'list-art',
                                 generic_text_extractor('div', 'text', clean_lidovky_text),
                                 generic_abstract_extractor(['div', 'p'], 'opener', clean_lidovky_abstract),
                                 generic_headline_extractor(dont_clean),
                                 lidovky_date),
    'idnes.cz': DomainSettings('windows-1250',
                               None, None,
                               generic_text_extractor('div', 'text', dont_clean),
                               generic_abstract_extractor('div', 'opener', abstract_to_text),
                               generic_headline_extractor(dont_clean),
                               idnes_date),
    'denik.cz': DomainSettings('utf-8',
                               None, None,
                               generic_text_extractor('div', 'bbtext', dont_clean),
                               generic_abstract_extractor('p', 'perex', clean_denik_abstract),
                               generic_headline_extractor(dont_clean),
                               denik_date),
    'ceskenoviny.cz': DomainSettings('utf-8',
                                     None, None,
                                     ceskenoviny_text_extractor,
                                     ceskenoviny_abstract_extractor,
                                     ceskenoviny_headline_extractor,
                                     ceskenoviny_date_extractor)
}