ye-nlp commited on
Commit
daa3646
·
1 Parent(s): 1e5987b

added char_syl_ngram/

Browse files
char_syl_ngram/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # __init__.py in char_syl_ngram directory
2
+ from .char_syl_ngram import sylbreak, generate_ngrams, generate_char_ngrams, train_naive_bayes, detect_language_naive_bayes
3
+
char_syl_ngram/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (318 Bytes). View file
 
char_syl_ngram/__pycache__/char_syl_ngram.cpython-38.pyc ADDED
Binary file (3.55 kB). View file
 
char_syl_ngram/char_syl_ngram.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Approach: A language detection algorithm using a Naive Bayes classifier with n-gram models
3
+
4
+ The n-gram model in this script is built by generating n-grams (either syllable or character-based) from a text corpus and computing their frequencies. Laplace smoothing is applied to these frequencies to account for n-grams not present in the training data, ensuring non-zero probabilities. For language detection, the script compares the n-grams of the input text against pre-computed language profiles, using a backoff strategy for missing n-grams, progressively reducing their length until a match is found. The detection calculates a score for each language by summing the logarithm of the probabilities of the text's n-grams according to the language profile. Finally, it normalizes these scores into a probability distribution, indicating the likelihood of the input text belonging to each language.
5
+
6
+ Language profile format: JSON
7
+
8
+ Written by Ye Kyaw Thu, LU Lab., Myanmar
9
+ Last updated: 31 Jan 2024
10
+ """
11
+
12
+ import os
13
+ import sys
14
+ import argparse
15
+ import re
16
+ from collections import defaultdict, Counter
17
+ from math import log
18
+
19
+ def sylbreak(text):
20
+ myConsonant = r"က-အၵၶꧠၚၸꩡၡၛၺꧣၻၼၽၾၿၥဿႁၜၝၯၦဢဧဨဩ"
21
+ enChar = r"a-zA-Z0-9"
22
+ ssSymbol = r"္"
23
+ aThat = r"်"
24
+ otherChar = r"ဣဤဥဦဧဩဪဿ၌၍၎၏၀-၉႐-႙၊။!-\/:-\@\[-`’“”{-~\s…"
25
+
26
+ pattern = fr"((?<!{ssSymbol})[{myConsonant}](?![{aThat}{ssSymbol}])|[{enChar}{otherChar}])"
27
+
28
+ # Using re.sub to insert a space before every matched pattern
29
+ result = re.sub(pattern, r' \1', text)
30
+ #return result.strip() # Removing leading and trailing spaces
31
+ return result.split()
32
+
33
+ def generate_ngrams(tokens, n):
34
+ return [' '.join(tokens[i:i+n]) for i in range(len(tokens) - n + 1)]
35
+
36
+ def generate_char_ngrams(text, n):
37
+ return [text[i:i+n] for i in range(len(text) - n + 1)]
38
+
39
+ def train_naive_bayes(corpus, n=3, k=0.01, use_syllables=True):
40
+ if use_syllables:
41
+ tokens = sylbreak(corpus)
42
+ ngrams = generate_ngrams(tokens, n)
43
+ else:
44
+ ngrams = generate_char_ngrams(corpus, n)
45
+
46
+ ngram_freqs = Counter(ngrams)
47
+ total_ngrams = len(ngrams)
48
+ vocab_size = len(ngram_freqs)
49
+ probabilities = {ngram: (count + k) / (total_ngrams + vocab_size * k) for ngram, count in ngram_freqs.items()}
50
+ return probabilities
51
+
52
+ def backoff_strategy(ngram, profile):
53
+ #if ngram not in profile:
54
+ # print(f"N-gram not found: {ngram}")
55
+ tokens = ngram.split()
56
+ while tokens and ' '.join(tokens) not in profile:
57
+ tokens = tokens[1:]
58
+ ngram = ' '.join(tokens)
59
+ if not ngram:
60
+ return 1e-5 # Instead of 1 / len(profile)
61
+ return profile.get(ngram)
62
+
63
+ def detect_language_naive_bayes(text, profiles_folder, n=3, use_syllables=True, verbose=False):
64
+ profiles = {}
65
+ for profile_file in os.listdir(profiles_folder):
66
+ profile_path = os.path.join(profiles_folder, profile_file)
67
+ with open(profile_path, 'r', encoding='utf-8') as f:
68
+ profile_data = {}
69
+ for line_no, line in enumerate(f, 1):
70
+ parts = line.strip().split('\t')
71
+ if len(parts) != 2:
72
+ if verbose:
73
+ print(f"Warning: Malformed line #{line_no} in {profile_path}. Skipping.")
74
+ continue
75
+ ngram, prob = parts
76
+ profile_data[ngram] = float(prob)
77
+ profiles[os.path.splitext(profile_file)[0]] = profile_data
78
+
79
+ scores = defaultdict(float)
80
+ if use_syllables:
81
+ tokens = sylbreak(text)
82
+ ngrams_text = Counter(generate_ngrams(tokens, n))
83
+ else:
84
+ ngrams_text = Counter(generate_char_ngrams(text, n))
85
+
86
+ for language, profile in profiles.items():
87
+ for ngram, count in ngrams_text.items():
88
+ prob = backoff_strategy(ngram, profile)
89
+ scores[language] += count * log(prob)
90
+
91
+ # Normalize scores
92
+ max_score = max(scores.values())
93
+ exp_scores = {lang: 2 ** (score - max_score) for lang, score in scores.items()}
94
+ total_score = sum(exp_scores.values())
95
+ probabilities = {language: score / total_score for language, score in exp_scores.items()}
96
+
97
+ return probabilities
98
+
99
+
100
+
char_syl_ngram/ref/language_detection.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ ဒီဗားရှင်းက ngram profile မဆောက်ခင်မှာ အရင်ဆုံး syllable ဖြတ်လိုက်တယ်။ ပြီးမှ syllable ngram ဆောက်တာ။
4
+
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import argparse
10
+ from collections import defaultdict, Counter
11
+ from math import log
12
+ import re
13
+
14
+ def sylbreak(text):
15
+ myConsonant = r"က-အၵၶꧠၚၸꩡၡၛၺꧣၻၼၽၾၿၥဿႁၜၝၯၦဢဧဨဩ"
16
+ enChar = r"a-zA-Z0-9"
17
+ ssSymbol = r"္"
18
+ aThat = r"်"
19
+ otherChar = r"ဣဤဥဦဧဩဪဿ၌၍၎၏၀-၉႐-႙၊။!-\/:-\@\[-`’“”{-~\s…"
20
+
21
+ pattern = fr"((?<!{ssSymbol})[{myConsonant}](?![{aThat}{ssSymbol}])|[{enChar}{otherChar}])"
22
+
23
+ # Using re.sub to insert a space before every matched pattern
24
+ result = re.sub(pattern, r' \1', text)
25
+ #return result.strip() # Removing leading and trailing spaces
26
+ return result.split()
27
+
28
+
29
+ def generate_ngrams(tokens, n):
30
+ return [' '.join(tokens[i:i+n]) for i in range(len(tokens) - n + 1)]
31
+
32
+
33
+ def train_naive_bayes(corpus, n=3, k=0.01):
34
+ ngrams = generate_ngrams(corpus, n)
35
+ ngram_freqs = Counter(ngrams)
36
+ total_ngrams = len(ngrams)
37
+
38
+ # Convert counts to probabilities with refined Laplace smoothing
39
+ vocab_size = len(ngram_freqs)
40
+ probabilities = {ngram: (count + k) / (total_ngrams + vocab_size*k) for ngram, count in ngram_freqs.items()}
41
+
42
+ return probabilities
43
+
44
+ def backoff_strategy(ngram, profile):
45
+ #if ngram not in profile:
46
+ # print(f"N-gram not found: {ngram}")
47
+ tokens = ngram.split()
48
+ while tokens and ' '.join(tokens) not in profile:
49
+ tokens = tokens[1:]
50
+ ngram = ' '.join(tokens)
51
+ if not ngram:
52
+ return 1e-5 # Instead of 1 / len(profile)
53
+ return profile.get(ngram)
54
+
55
+ def detect_language_naive_bayes(text, profiles_folder, n=3, verbose=False):
56
+ profiles = {}
57
+ for profile_file in os.listdir(profiles_folder):
58
+ profile_path = os.path.join(profiles_folder, profile_file)
59
+ with open(profile_path, 'r', encoding='utf-8') as f:
60
+ profile_data = {}
61
+ for line_no, line in enumerate(f, 1):
62
+ parts = line.strip().split('\t')
63
+ if len(parts) != 2:
64
+ if verbose:
65
+ print(f"Warning: Malformed line #{line_no} in {profile_path}. Skipping.")
66
+ continue
67
+ ngram, prob = parts
68
+ profile_data[ngram] = float(prob)
69
+ profiles[os.path.splitext(profile_file)[0]] = profile_data
70
+
71
+ scores = defaultdict(float)
72
+ ngrams_text = Counter(generate_ngrams(text, n))
73
+ #print(list(ngrams_text.items())[:5]) # print first 5 n-grams of input text
74
+
75
+
76
+ for language, profile in profiles.items():
77
+ #print(language, list(profile.items())[:5]) # print first 5 n-grams for each language, 4debug
78
+
79
+ for ngram, count in ngrams_text.items():
80
+ prob = backoff_strategy(ngram, profile)
81
+ scores[language] += count * log(prob)
82
+
83
+ # Normalize scores
84
+ max_score = max(scores.values())
85
+ exp_scores = {lang: 2 ** (score - max_score) for lang, score in scores.items()}
86
+ total_score = sum(exp_scores.values())
87
+ probabilities = {language: score / total_score for language, score in exp_scores.items()}
88
+
89
+ return probabilities
90
+
91
+ if __name__ == '__main__':
92
+ parser = argparse.ArgumentParser(description='Naive Bayes based language detection and profile creation.')
93
+ parser.add_argument('-i', '--input', type=str, required=True, help='Input file path.')
94
+ parser.add_argument('-o', '--output', type=str, help='Output file path (only for profile creation).', default=None)
95
+ parser.add_argument('-c', '--create_profile', action='store_true', help='Flag to create profile.')
96
+ parser.add_argument('-d', '--detect', action='store_true', help='Flag to detect language.')
97
+ parser.add_argument('-p', '--profile_folder', type=str, help='Folder path containing language profiles (only for detection).', default=None)
98
+ parser.add_argument('-n', '--ngram', type=int, help='Maximum n-gram value (default: 3).', default=3)
99
+ parser.add_argument('-v', '--verbose', action='store_true', help='Display warning messages.')
100
+
101
+ args = parser.parse_args()
102
+
103
+ with open(args.input, 'r', encoding='utf-8') as file:
104
+ raw_text = file.read();
105
+ text = sylbreak(raw_text.replace(' ', '')) # Removing spaces and then applying syllable breaking
106
+
107
+
108
+ if args.create_profile:
109
+ profile = train_naive_bayes(text, args.ngram)
110
+ if args.output:
111
+ with open(args.output, 'w', encoding='utf-8') as file:
112
+ for ngram, prob in profile.items():
113
+ file.write(f"{ngram}\t{prob}\n")
114
+ else:
115
+ for ngram, prob in profile.items():
116
+ sys.stdout.write(f"{ngram}\t{prob}\n")
117
+
118
+ elif args.detect:
119
+ if not args.profile_folder:
120
+ print("Please provide a profiles folder for detection using -p or --profile_folder!")
121
+ sys.exit(1)
122
+ probabilities = detect_language_naive_bayes(text, args.profile_folder, args.ngram, args.verbose)
123
+ for lang, prob in probabilities.items():
124
+ print(f"{lang}: {prob*100:.2f}%")
125
+
126
+ else:
127
+ print("Please choose an operation: create profile (-c) or detect language (-d).")
128
+