| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from collections import namedtuple |
| from datetime import datetime |
| import json |
| import re |
| from urllib.parse import urlparse |
|
|
| from bs4 import BeautifulSoup |
| import dateutil.parser |
| import dateutil.tz |
| import regex |
|
|
| DomainSettings = namedtuple("DomainSettings", |
| ["encoding", |
| "l_tag", "l_class", |
| "document_extractor", |
| "abstract_extractor", |
| "headline_extractor", |
| "date_extractor" |
| ] |
| ) |
|
|
| month_mapping = { |
| "ledna": 1, |
| "ůnora": 2, |
| "února": 2, |
| "března": 3, |
| "dubna": 4, |
| "května": 5, |
| "června": 6, |
| "července": 7, |
| "srpna": 8, |
| "září": 9, |
| "října": 10, |
| "listopadu": 11, |
| "prosince": 12, |
| } |
|
|
| date_re = re.compile(r"(\d{1,2})\.\s([^\W\d_]+)\s(\d{4}),?\s(\d{1,2}):(\d{2})") |
| cet_tz = dateutil.tz.gettz("CET") |
| author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE) |
| multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE) |
| pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE) |
|
|
| |
| ctete_take_re = re.compile("Čtěte také.*", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE) |
|
|
| caption_re = re.compile("\w\s*$", flags=re.DOTALL+re.MULTILINE) |
| author_re = re.compile("<p[^<]*(Autor:|FOTO:).*?</p>", flags=re.DOTALL+re.MULTILINE+re.IGNORECASE) |
| multiple_linebreaks_re = re.compile(r"\n\s*\n", flags=re.MULTILINE) |
| pp_re = re.compile(r"</(h4|p)>\s*<(h4|p)>", flags=re.MULTILINE) |
|
|
| |
| novinky_starts_of_interest_re = re.compile("Nové knihy|Nová DVD|Nová CD|Premiéry|Vánoční knihy|Vánoční DVD|Vánoční CD") |
|
|
| |
| lidovky_starts_of_interest_re = re.compile(r"Mistrovství Evropy ve fotbale 2016|MS v ledním hokeji|Kryje inspekce policejní zátaras z lidí\? O šetření nehody na D1 rozhodne soud") |
|
|
| |
| denik_starts_of_interest_re = re.compile("Právě jsme se narodila. Vaše miminka|Právě jsme se narodili|Pozvánk") |
| denik_abstract_fix_re = regex.compile(r"(.*?(/[\p{Lu} ,-]*?/)? ?[-–])|(/[\p{Lu} ,-]*?/)") |
|
|
| def parse_czech_month_date(datetime_str): |
| '''Parse date in format with Czech month names. |
| Used for: |
| - lidovky.cz (dates like '2. ledna 2012 19:47') |
| - novinky.cz (dates like 'pondělí 4. srpna 2003, 1:57') |
| ''' |
| match = date_re.search(datetime_str.lower()) |
| if match is not None: |
| dt = datetime(int(match.group(3)), month_mapping[match.group(2)], int(match.group(1)), |
| int(match.group(4)), int(match.group(5)), 0, 0, cet_tz) |
| return dt.strftime("%Y-%m-%dT%H:%M:%S%z") |
| return None |
|
|
| def parse_iso_date(datetime_str): |
| '''Parse date in ISO format. |
| Used for: |
| - denik.cz (dates like '2009-08-20T21:00:00+02:00') |
| - idnes.cz (dates like '2015-12-17T06:57CET') |
| ''' |
| |
| dt = dateutil.parser.parse(datetime_str) |
| |
| if dt.tzinfo is None: |
| dt = dt.replace(tzinfo=cet_tz) |
| dt = dt.astimezone(cet_tz) |
| |
| return dt.strftime("%Y-%m-%dT%H:%M:%S%z") |
|
|
| |
| def dont_clean(raw_text): |
| '''Dummy function for text that doesn't need cleaning. |
| ''' |
| return raw_text |
|
|
| def abstract_to_text(raw_abstract): |
| '''Clean abstract. |
| ''' |
| return raw_abstract.text.strip() |
|
|
| def process_text(raw_text, domain, clean_domain_specific_text): |
| '''Clean text. |
| ''' |
| |
| for tag in raw_text(['script', 'h1', 'h2', 'h3', 'img']): |
| tag.extract() |
|
|
| |
| raw_text = clean_domain_specific_text(raw_text) |
|
|
| |
| raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(raw_text))) |
| cleaned_text = BeautifulSoup(raw_text, 'html.parser').text |
|
|
| if domain == 'denik.cz': |
| cleaned_text = ctete_take_re.sub('', cleaned_text) |
|
|
| |
| cleaned_text = multiple_linebreaks_re.sub('\n', cleaned_text.replace(' ', '\n').strip()) |
| return cleaned_text |
|
|
| def remove_headline_intro(headline): |
| '''Remove some non-informative headline intros''' |
| return re.sub(r'^(VIDEO|OBRAZEM|Autofotka týdne|Finanční poradna|Tipy na víkend' + |
| '|RECENZE|Český poutník|Kam o víkendu s dětmi|TEST|Tip na výlet' + |
| '|KOMENTÁŘ|Průzkum|S kamerou na cestách|Video týdne|Rady do zahrady' + |
| '|POHNUTÉ OSUDY|ANALÝZA|Test|BAZAR|Putování s BBC|Co vám uniklo|ON-LINE' + |
| '|Potvrzeno|ANKETA|Otázky čtenářů|Poslední slovo|Je to oficiální' + |
| '|GLOSA|PŘEHLEDNĚ|ROZHOVOR|Výzkum|NÁZOR|ON-LINE CHAT|Na poslední chvíli' + |
| '|TOP \d+ TÝDNE|Dlouhodobý test|FOTO|FOTO, VIDEO|DOKUMENT|EXKLU[SZ]IVNĚ' + |
| '|CO VÁS ZAUJME|ANIMACE|ON-LINE REPORTÁŽ|BYDLENÍ|SOUTĚŽ|RETRO|AUDIO' + |
| '|KRÁTCE|AUTOVIDEA ROKU|REPORTÁŽ|PODÍVEJTE SE|VIDEOTEST|Ukázka|TÉMA' + |
| '|\d+\.\s*kolo(\s+ELH)?)\s*:\s*', '', headline, flags=re.I) |
|
|
| def process_headline(raw_headline, clean_headline): |
| headline = raw_headline.text |
| headline = remove_headline_intro(headline) |
| headline = clean_headline(headline) |
| return headline |
|
|
|
|
| def generic_headline_extractor(headline_cleaner): |
| def f(soup): |
| headline_list = soup.find_all('h1') |
|
|
| |
| headline_text = process_headline(headline_list[0], headline_cleaner) |
| return headline_text |
|
|
| return f |
|
|
|
|
| def ceskenoviny_headline_extractor(soup): |
| headline_soup = soup.find('h1', itemprop='name') |
| headline_text = headline_soup.text.strip() |
| return headline_text |
|
|
|
|
| def generic_abstract_extractor(s_tag, s_class, abstract_cleaner): |
| def f(soup): |
| abstract_soup = soup.find(s_tag, class_=s_class) |
| abstract_text = abstract_cleaner(abstract_soup) |
| return abstract_text |
|
|
| return f |
|
|
|
|
| def ceskenoviny_abstract_extractor(soup): |
| |
| article_soup = soup.find('div', itemprop='articleBody') |
| abstract_soup = article_soup.find('p', itemprop='description') |
| if '-' in abstract_soup.text: |
| abstract_text = abstract_soup.text.split('-', maxsplit=1)[1].strip() |
| else: |
| abstract_text = abstract_soup.text.strip() |
| abstract_soup.extract() |
| return abstract_text |
|
|
|
|
| def generic_text_extractor(t_tag, t_class, text_cleaner): |
| def f(soup, domain): |
| |
| document_soup = soup.find(t_tag, class_=t_class) |
| document_text = process_text(document_soup, domain, text_cleaner) |
| return document_text |
| return f |
|
|
|
|
| def ceskenoviny_text_extractor(soup, _): |
| article_soup = soup.find('div', itemprop='articleBody') |
| for tag in article_soup(['div', 'ul', 'script', 'img']): |
| tag.extract() |
| tags = article_soup.find('p', class_='tags') |
| if tags is not None: |
| tags.extract() |
|
|
| |
| raw_text = author_re.sub('', pp_re.sub('</p>\n<p>', str(article_soup))) |
| clean_text = BeautifulSoup(raw_text, 'html.parser').text |
|
|
| clean_text = multiple_linebreaks_re.sub('\n', clean_text.replace(' ', '\n').strip()) |
| return clean_text |
|
|
|
|
| def ceskenoviny_date_extractor(soup): |
| dt_str = soup.find('span', itemprop='datePublished').text |
| dt = datetime.strptime(dt_str, "%d.%m.%Y, %H:%M").replace(tzinfo=cet_tz) |
| published = dt.strftime("%Y-%m-%dT%H:%M:%S%z") |
| return published |
|
|
| |
| |
| def clean_novinky_headline(headline): |
| if novinky_starts_of_interest_re.match(headline) is not None and\ |
| '-' in headline: |
| return headline.split('-', maxsplit=1)[1].strip() |
| return headline |
|
|
| def clean_novinky_text(raw_text): |
| '''Clean text from novinky.cz |
| ''' |
| |
| for tag in raw_text('div', ['articlePhotos', 'articleVideo']): |
| tag.extract() |
| |
| for tag in raw_text('p', ['acmDescription', 'acmAuthor']): |
| tag.extract() |
| |
| for tag in raw_text('p', string=author_re): |
| tag.extract() |
| |
| for tag in raw_text('h4', string=caption_re): |
| tag.extract() |
| |
| for tag in raw_text('table', 'table_1'): |
| tag.extract() |
|
|
| return raw_text |
|
|
| def novinky_date(soup): |
| '''Example: |
| <p id="articleDate" class="publicDate"> čtvrtek 31. července 2003, 13:22 </p> |
| ''' |
| date_soup = soup.find('p', id='articleDate') |
| if date_soup is None: |
| return None |
| return parse_czech_month_date(date_soup.text.split('-')[0].strip()) |
|
|
| |
| def clean_lidovky_text(raw_text): |
| '''Clean text from lidovky.cz |
| ''' |
| |
| for tag in raw_text('table', ['not4bbtext']): |
| tag.extract() |
| for tag in raw_text('blockquote', class_='twitter-tweet'): |
| tag.extract() |
| |
| for tag in raw_text('p', ['opener']): |
| tag.extract() |
| return raw_text |
|
|
| def clean_lidovky_abstract(abstract): |
| for tag in abstract('span', 'domicil'): |
| tag.extract() |
| return abstract.text.strip() |
|
|
| def lidovky_date(soup): |
| '''Example: |
| <span class="time"> |
| 2. ledna 2012 19:47 |
| </span> |
| ''' |
| date_soup = soup.find('span', class_='time') |
| if date_soup is None: |
| return None |
| datetime_str = date_soup.text.split(',')[0].strip().replace(' ', ' ') |
| return parse_czech_month_date(datetime_str) |
|
|
| |
| def clean_denik_abstract(abstract): |
| abstract = abstract.text.strip() |
| abstract = denik_abstract_fix_re.sub('', abstract.rstrip('-–')).strip() |
| return abstract |
|
|
| def denik_date(soup): |
| '''Example: |
| <meta property="article:published_time" content="2011-01-24T20:00:00+01:00">\ |
| ''' |
| date_soup = soup.find('meta', property="article:published_time") |
| if date_soup is None: |
| return None |
| return parse_iso_date(date_soup['content']) |
|
|
| |
| def idnes_date(soup): |
| '''Example: |
| <span class="time-date" itemprop="datePublished" content="2012-07-02T15:18CET"> |
| ''' |
| date_soup = soup.find('span', itemprop="datePublished") |
| if date_soup is None: |
| return None |
| return parse_iso_date(date_soup['content'].strip()) |
|
|
| |
|
|
| |
| domain_settings_dict = { |
| 'novinky.cz': DomainSettings('utf-8', |
| None, None, |
| generic_text_extractor('div','articleBody', clean_novinky_text), |
| generic_abstract_extractor('p', 'perex', abstract_to_text), |
| generic_headline_extractor(clean_novinky_headline), |
| novinky_date), |
| 'lidovky.cz': DomainSettings('windows-1250', |
| 'div', 'list-art', |
| generic_text_extractor('div', 'text', clean_lidovky_text), |
| generic_abstract_extractor(['div', 'p'], 'opener', clean_lidovky_abstract), |
| generic_headline_extractor(dont_clean), |
| lidovky_date), |
| 'idnes.cz': DomainSettings('windows-1250', |
| None, None, |
| generic_text_extractor('div', 'text', dont_clean), |
| generic_abstract_extractor('div', 'opener', abstract_to_text), |
| generic_headline_extractor(dont_clean), |
| idnes_date), |
| 'denik.cz': DomainSettings('utf-8', |
| None, None, |
| generic_text_extractor('div', 'bbtext', dont_clean), |
| generic_abstract_extractor('p', 'perex', clean_denik_abstract), |
| generic_headline_extractor(dont_clean), |
| denik_date), |
| 'ceskenoviny.cz': DomainSettings('utf-8', |
| None, None, |
| ceskenoviny_text_extractor, |
| ceskenoviny_abstract_extractor, |
| ceskenoviny_headline_extractor, |
| ceskenoviny_date_extractor) |
| } |
|
|