| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import logging |
| from tika import parser |
| import re |
| from io import BytesIO |
|
|
| from deepdoc.parser.utils import get_text |
| from rag.nlp import bullets_category, is_english,remove_contents_table, \ |
| hierarchical_merge, make_colon_as_title, naive_merge, random_choices, tokenize_table, \ |
| tokenize_chunks |
| from rag.nlp import rag_tokenizer |
| from deepdoc.parser import PdfParser, DocxParser, PlainParser, HtmlParser |
|
|
|
|
| class Pdf(PdfParser): |
| def __call__(self, filename, binary=None, from_page=0, |
| to_page=100000, zoomin=3, callback=None): |
| from timeit import default_timer as timer |
| start = timer() |
| callback(msg="OCR started") |
| self.__images__( |
| filename if not binary else binary, |
| zoomin, |
| from_page, |
| to_page, |
| callback) |
| callback(msg="OCR finished ({:.2f}s)".format(timer() - start)) |
|
|
| start = timer() |
| self._layouts_rec(zoomin) |
| callback(0.67, "Layout analysis ({:.2f}s)".format(timer() - start)) |
| logging.debug("layouts: {}".format(timer() - start)) |
|
|
| start = timer() |
| self._table_transformer_job(zoomin) |
| callback(0.68, "Table analysis ({:.2f}s)".format(timer() - start)) |
|
|
| start = timer() |
| self._text_merge() |
| tbls = self._extract_table_figure(True, zoomin, True, True) |
| self._naive_vertical_merge() |
| self._filter_forpages() |
| self._merge_with_same_bullet() |
| callback(0.8, "Text extraction ({:.2f}s)".format(timer() - start)) |
|
|
| return [(b["text"] + self._line_tag(b, zoomin), b.get("layoutno", "")) |
| for b in self.boxes], tbls |
|
|
|
|
| def chunk(filename, binary=None, from_page=0, to_page=100000, |
| lang="Chinese", callback=None, **kwargs): |
| """ |
| Supported file formats are docx, pdf, txt. |
| Since a book is long and not all the parts are useful, if it's a PDF, |
| please setup the page ranges for every book in order eliminate negative effects and save elapsed computing time. |
| """ |
| doc = { |
| "docnm_kwd": filename, |
| "title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", filename)) |
| } |
| doc["title_sm_tks"] = rag_tokenizer.fine_grained_tokenize(doc["title_tks"]) |
| pdf_parser = None |
| sections, tbls = [], [] |
| if re.search(r"\.docx$", filename, re.IGNORECASE): |
| callback(0.1, "Start to parse.") |
| doc_parser = DocxParser() |
| |
| sections, tbls = doc_parser( |
| binary if binary else filename, from_page=from_page, to_page=to_page) |
| remove_contents_table(sections, eng=is_english( |
| random_choices([t for t, _ in sections], k=200))) |
| tbls = [((None, lns), None) for lns in tbls] |
| callback(0.8, "Finish parsing.") |
|
|
| elif re.search(r"\.pdf$", filename, re.IGNORECASE): |
| pdf_parser = Pdf() |
| if kwargs.get("layout_recognize", "DeepDOC") == "Plain Text": |
| pdf_parser = PlainParser() |
| sections, tbls = pdf_parser(filename if not binary else binary, |
| from_page=from_page, to_page=to_page, callback=callback) |
|
|
| elif re.search(r"\.txt$", filename, re.IGNORECASE): |
| callback(0.1, "Start to parse.") |
| txt = get_text(filename, binary) |
| sections = txt.split("\n") |
| sections = [(line, "") for line in sections if line] |
| remove_contents_table(sections, eng=is_english( |
| random_choices([t for t, _ in sections], k=200))) |
| callback(0.8, "Finish parsing.") |
|
|
| elif re.search(r"\.(htm|html)$", filename, re.IGNORECASE): |
| callback(0.1, "Start to parse.") |
| sections = HtmlParser()(filename, binary) |
| sections = [(line, "") for line in sections if line] |
| remove_contents_table(sections, eng=is_english( |
| random_choices([t for t, _ in sections], k=200))) |
| callback(0.8, "Finish parsing.") |
|
|
| elif re.search(r"\.doc$", filename, re.IGNORECASE): |
| callback(0.1, "Start to parse.") |
| binary = BytesIO(binary) |
| doc_parsed = parser.from_buffer(binary) |
| sections = doc_parsed['content'].split('\n') |
| sections = [(line, "") for line in sections if line] |
| remove_contents_table(sections, eng=is_english( |
| random_choices([t for t, _ in sections], k=200))) |
| callback(0.8, "Finish parsing.") |
|
|
| else: |
| raise NotImplementedError( |
| "file type not supported yet(doc, docx, pdf, txt supported)") |
|
|
| make_colon_as_title(sections) |
| bull = bullets_category( |
| [t for t in random_choices([t for t, _ in sections], k=100)]) |
| if bull >= 0: |
| chunks = ["\n".join(ck) |
| for ck in hierarchical_merge(bull, sections, 5)] |
| else: |
| sections = [s.split("@") for s, _ in sections] |
| sections = [(pr[0], "@" + pr[1]) if len(pr) == 2 else (pr[0], '') for pr in sections ] |
| chunks = naive_merge( |
| sections, kwargs.get( |
| "chunk_token_num", 256), kwargs.get( |
| "delimer", "\n。;!?")) |
|
|
| |
| |
| eng = lang.lower() == "english" |
|
|
| res = tokenize_table(tbls, doc, eng) |
| res.extend(tokenize_chunks(chunks, doc, eng, pdf_parser)) |
|
|
| return res |
|
|
|
|
| if __name__ == "__main__": |
| import sys |
|
|
| def dummy(prog=None, msg=""): |
| pass |
| chunk(sys.argv[1], from_page=1, to_page=10, callback=dummy) |
|
|