Datasets:

Modalities:
Text
Formats:
json
Languages:
Czech
Size:
< 1K
Libraries:
Datasets
pandas
License:
CNC_Capek / convert_capek.py
mfajcik's picture
Upload 2 files
1d2217c verified
import os
import re
from typing import Dict
import jsonlines
from tqdm import tqdm
TARGET=".data/capek.vert"
def process_vert_format(vert_content: str) -> Dict[str, str]:
# Pattern to match document boundaries and extract metadata
doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
metadata_pattern = re.compile(
r'<doc id="([^"]*)".+?autor="([^"]*)".+?nazev="([^"]*)".+?genre="([^"]*)"')
sent_pattern = re.compile(r'<s[^>]*id="([^"]*)".*?>(.*?)</s>', re.DOTALL)
# Pattern to remove whitespace before punctuation
ws_before_punct = re.compile(r'\s+([.,!?:;])')
# Find all documents
documents = re.findall(doc_pattern, vert_content)
processed_documents = {}
for doc in tqdm(documents):
# Extract metadata
metadata_match = re.search(metadata_pattern, doc)
if metadata_match:
# r'<doc id="([^"]*)".+?rok="([^"]*)".+?misto="([^"]*)" sidlotyp="([^"]*)".+?tema="([^"]*)" pocetml="([^"]*)"')
doc_id = metadata_match.group(1)
author = metadata_match.group(2)
title = metadata_match.group(3)
genre = metadata_match.group(4)
metadata_str = (f"Autor: {author}, "
f"Název: {title}, "
f"Žánr: {genre}")
else:
raise ValueError("Metadata not found in document")
# Initialize an empty list to hold processed document text
processed_document = [metadata_str]
# Find all speaker turns within the document
for sent_match in re.findall(sent_pattern, doc):
sp_content = sent_match[1]
# remove tags from each line, and join text
tokens = [line.split("\t")[0].strip() for line in sp_content.split("\n") if line.strip() != ""]
speaker_text = " ".join(tokens)
# replace more than one space with one space
speaker_text = re.sub(r'\s+', ' ', speaker_text).strip()
# remove whitespace before ., !, ?
speaker_text = re.sub(ws_before_punct, r'\1', speaker_text)
# - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
if speaker_text.strip() == "":
continue
processed_document.append(speaker_text)
final_text = '\n'.join(processed_document)
processed_documents[doc_id] = final_text
return processed_documents
# Read the content from the file
with open(TARGET, "r") as f:
vert_content = f.read()
# Process the content
processed_documents = process_vert_format(vert_content)
# write all splits into same json file in .data/hf_dataset/cnc_fictree/test.jsonl
OF = ".data/hf_dataset/cnc_capek/test.jsonl"
os.makedirs(os.path.dirname(OF), exist_ok=True)
with jsonlines.open(OF, "w") as writer:
for doc_id, doc in list(processed_documents.items()):
writer.write({"text": doc, "id": doc_id})