rain1024 commited on
Commit
3b0c416
·
verified ·
1 Parent(s): d1cffef

Add scripts/fetch_data.py

Browse files
Files changed (1) hide show
  1. scripts/fetch_data.py +115 -0
scripts/fetch_data.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fetch data from HuggingFace dataset undertheseanlp/UTS_VLC
3
+ - Get documents from law dataset
4
+ - Segment sentences using underthesea
5
+ - Get first 3000 sentences
6
+ """
7
+
8
+ import re
9
+ from os.path import dirname, join
10
+
11
+ from datasets import load_dataset
12
+
13
+ from underthesea import sent_tokenize, text_normalize
14
+
15
+
16
+ def clean_text(text):
17
+ """Remove markdown formatting and clean text."""
18
+ # Normalize Unicode using underthesea
19
+ text = text_normalize(text)
20
+ # Remove markdown headers
21
+ text = re.sub(r'^#+\s+', '', text, flags=re.MULTILINE)
22
+ # Remove bold/italic markers
23
+ text = re.sub(r'\*+', '', text)
24
+ # Remove horizontal rules
25
+ text = re.sub(r'^-+$', '', text, flags=re.MULTILINE)
26
+ # Remove links
27
+ text = re.sub(r'\[([^\]]+)\]\([^)]+\)', r'\1', text)
28
+ # Remove multiple newlines
29
+ text = re.sub(r'\n{2,}', '\n', text)
30
+ # Remove leading/trailing whitespace per line
31
+ lines = [line.strip() for line in text.split('\n')]
32
+ text = '\n'.join(lines)
33
+ return text
34
+
35
+
36
+ def is_valid_sentence(sent):
37
+ """Check if sentence is valid for UD annotation."""
38
+ sent = sent.strip()
39
+ # Remove trailing list markers like "1." or "a)"
40
+ sent = re.sub(r'\n\d+\.$', '', sent)
41
+ sent = re.sub(r'\n[a-z]\)$', '', sent)
42
+ sent = sent.strip()
43
+
44
+ if not sent:
45
+ return False, sent
46
+ # Too short
47
+ if len(sent) < 20:
48
+ return False, sent
49
+ # Too long
50
+ if len(sent) > 300:
51
+ return False, sent
52
+ # Skip headers (all caps, or starts with "Điều", "Chương", etc.)
53
+ if re.match(r'^(QUỐC HỘI|CỘNG HÒA|Độc lập|Phần thứ|Chương [IVX]+|MỤC \d+)', sent):
54
+ return False, sent
55
+ # Skip article titles
56
+ if re.match(r'^(Điều \d+|Khoản \d+|Mục \d+)', sent):
57
+ return False, sent
58
+ # Skip if mostly uppercase
59
+ if sum(1 for c in sent if c.isupper()) > len(sent) * 0.5:
60
+ return False, sent
61
+ # Skip if starts with special markers
62
+ if sent.startswith(('English:', 'Số hiệu:', 'Ngày hiệu lực:', '---', '|')):
63
+ return False, sent
64
+ # Must contain Vietnamese characters
65
+ if not re.search(r'[àáảãạăắằẳẵặâấầẩẫậèéẻẽẹêếềểễệìíỉĩịòóỏõọôốồổỗộơớờởỡợùúủũụưứừửữựỳýỷỹỵđ]', sent, re.IGNORECASE):
66
+ return False, sent
67
+ # Skip if ends with just a number (incomplete sentence)
68
+ if re.search(r'\n\d+$', sent):
69
+ return False, sent
70
+ return True, sent
71
+
72
+
73
+ def fetch_and_process():
74
+ # Load dataset from HuggingFace
75
+ print("Loading dataset from HuggingFace...")
76
+ ds = load_dataset("undertheseanlp/UTS_VLC", split="2026")
77
+
78
+ # Segment sentences from all documents until we have 3000
79
+ print("Segmenting sentences...")
80
+ all_sentences = []
81
+ for idx, doc in enumerate(ds):
82
+ content = doc["content"]
83
+ content = clean_text(content)
84
+ sentences = sent_tokenize(content)
85
+ for sent in sentences:
86
+ sent = sent.strip()
87
+ is_valid, cleaned_sent = is_valid_sentence(sent)
88
+ if is_valid:
89
+ all_sentences.append(cleaned_sent)
90
+ if len(all_sentences) >= 3000:
91
+ print(f"Processed {idx + 1} documents")
92
+ break
93
+
94
+ # Get first 3000 sentences
95
+ sentences_3000 = all_sentences[:3000]
96
+ print(f"Total sentences collected: {len(sentences_3000)}")
97
+
98
+ # Save to output file
99
+ output_dir = dirname(dirname(__file__))
100
+ output_file = join(output_dir, "sentences.txt")
101
+
102
+ with open(output_file, "w", encoding="utf-8") as f:
103
+ for i, sent in enumerate(sentences_3000, 1):
104
+ f.write(f"{i}\t{sent}\n")
105
+
106
+ print(f"Saved to: {output_file}")
107
+
108
+ # Print sample
109
+ print("\nSample sentences:")
110
+ for i, sent in enumerate(sentences_3000[:5], 1):
111
+ print(f" {i}. {sent[:80]}...")
112
+
113
+
114
+ if __name__ == "__main__":
115
+ fetch_and_process()