|
|
""" |
|
|
Fetch data from HuggingFace dataset undertheseanlp/UTS_VLC |
|
|
- Get documents from law dataset |
|
|
- Segment sentences using underthesea |
|
|
- Get first 3000 sentences |
|
|
""" |
|
|
|
|
|
import re |
|
|
from os.path import dirname, join |
|
|
|
|
|
from datasets import load_dataset |
|
|
|
|
|
from underthesea import sent_tokenize, text_normalize |
|
|
|
|
|
|
|
|
def clean_text(text): |
|
|
"""Remove markdown formatting and clean text.""" |
|
|
|
|
|
text = text_normalize(text) |
|
|
|
|
|
text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE) |
|
|
|
|
|
text = re.sub(r'\*+', '', text) |
|
|
|
|
|
text = re.sub(r'^-+$', '', text, flags=re.MULTILINE) |
|
|
|
|
|
text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text) |
|
|
|
|
|
text = re.sub(r'\n{2,}', '\n', text) |
|
|
|
|
|
lines = [line.strip() for line in text.split('\n')] |
|
|
text = '\n'.join(lines) |
|
|
return text |
|
|
|
|
|
|
|
|
def is_valid_sentence(sent): |
|
|
"""Check if sentence is valid for UD annotation.""" |
|
|
sent = sent.strip() |
|
|
|
|
|
sent = re.sub(r'\n\d+\.$', '', sent) |
|
|
sent = re.sub(r'\n[a-z]\)$', '', sent) |
|
|
sent = sent.strip() |
|
|
|
|
|
if not sent: |
|
|
return False, sent |
|
|
|
|
|
if len(sent) < 20: |
|
|
return False, sent |
|
|
|
|
|
if len(sent) > 300: |
|
|
return False, sent |
|
|
|
|
|
if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent): |
|
|
return False, sent |
|
|
|
|
|
if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent): |
|
|
return False, sent |
|
|
|
|
|
if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5: |
|
|
return False, sent |
|
|
|
|
|
if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')): |
|
|
return False, sent |
|
|
|
|
|
if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE): |
|
|
return False, sent |
|
|
|
|
|
if re.search(r'\n\d+$', sent): |
|
|
return False, sent |
|
|
return True, sent |
|
|
|
|
|
|
|
|
def fetch_and_process(): |
|
|
|
|
|
print("Loading dataset from HuggingFace...") |
|
|
ds = load_dataset("undertheseanlp/UTS_VLC", split="2026") |
|
|
|
|
|
|
|
|
print("Segmenting sentences...") |
|
|
all_sentences = [] |
|
|
for idx, doc in enumerate(ds): |
|
|
content = doc["content"] |
|
|
content = clean_text(content) |
|
|
sentences = sent_tokenize(content) |
|
|
for sent in sentences: |
|
|
sent = sent.strip() |
|
|
is_valid, cleaned_sent = is_valid_sentence(sent) |
|
|
if is_valid: |
|
|
all_sentences.append(cleaned_sent) |
|
|
if len(all_sentences) >= 3000: |
|
|
print(f"Processed {idx + 1} documents") |
|
|
break |
|
|
|
|
|
|
|
|
sentences_3000 = all_sentences[:3000] |
|
|
print(f"Total sentences collected: {len(sentences_3000)}") |
|
|
|
|
|
|
|
|
output_dir = dirname(dirname(__file__)) |
|
|
output_file = join(output_dir, "sentences.txt") |
|
|
|
|
|
with open(output_file, "w", encoding="utf-8") as f: |
|
|
for i, sent in enumerate(sentences_3000, 1): |
|
|
f.write(f"{i}\t{sent}\n") |
|
|
|
|
|
print(f"Saved to: {output_file}") |
|
|
|
|
|
|
|
|
print("\nSample sentences:") |
|
|
for i, sent in enumerate(sentences_3000[:5], 1): |
|
|
print(f" {i}. {sent[:80]}...") |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
fetch_and_process() |
|
|
|