added char_syl_freq/
Browse files
char_syl_freq/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# __init__.py in char_syl_freq directory
|
| 2 |
+
from .char_syl_freq import break_syllables, create_frequency_profile, save_profile, load_profiles, detect_language
|
| 3 |
+
|
char_syl_freq/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (308 Bytes). View file
|
|
|
char_syl_freq/__pycache__/char_syl_freq.cpython-38.pyc
ADDED
|
Binary file (3.11 kB). View file
|
|
|
char_syl_freq/char_syl_freq.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Approach: Language detection based on character and syllable frequency profiles
|
| 3 |
+
|
| 4 |
+
The character score is calculated by summing the products of the frequency of each character in the input text and its frequency in the language profile. If a character is not found in the profile, its frequency is assumed to be zero.
|
| 5 |
+
The syllable score is calculated in a similar way, using the frequencies of syllables.
|
| 6 |
+
The character score and syllable score for each language are then combined to form a total score.
|
| 7 |
+
After calculating the combined scores for all languages, the script identifies the language with the highest score as the detected language.
|
| 8 |
+
|
| 9 |
+
Language profile format: JSON
|
| 10 |
+
|
| 11 |
+
Written by Ye Kyaw Thu, LU Lab., Myanmar
|
| 12 |
+
Last updated: 31 Jan 2024
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
import argparse
|
| 16 |
+
import os
|
| 17 |
+
import json
|
| 18 |
+
import re
|
| 19 |
+
import sys
|
| 20 |
+
from collections import Counter
|
| 21 |
+
|
| 22 |
+
# Syllable segmentation functions and patterns
|
| 23 |
+
my_consonant = "က-အ"
|
| 24 |
+
en_char = "a-zA-Z0-9"
|
| 25 |
+
other_char = "ဣဤဥဦဧဩဪဿ၌၍၏၀-၉၊။!-/:-@[-`{-~\s"
|
| 26 |
+
subscript_symbol = '္'
|
| 27 |
+
a_that = '်'
|
| 28 |
+
break_pattern = re.compile(
|
| 29 |
+
r"((?<!" + subscript_symbol + r")[" + my_consonant + r"]"
|
| 30 |
+
r"(?!["
|
| 31 |
+
+ a_that + subscript_symbol + r"])"
|
| 32 |
+
+ r"|[" + en_char + other_char + r"])")
|
| 33 |
+
|
| 34 |
+
def break_syllables(text, separator='|'):
|
| 35 |
+
text = re.sub(r'\s+', ' ', text.strip())
|
| 36 |
+
segmented_text = break_pattern.sub(separator + r"\1", text)
|
| 37 |
+
if segmented_text.startswith(separator):
|
| 38 |
+
segmented_text = segmented_text[len(separator):]
|
| 39 |
+
segmented_text = segmented_text.replace(separator + " ", " ") # Ensure syllables are not concatenated
|
| 40 |
+
return segmented_text.split(separator)
|
| 41 |
+
|
| 42 |
+
def create_frequency_profile(text):
|
| 43 |
+
# Character frequency
|
| 44 |
+
char_count = Counter(text)
|
| 45 |
+
total_chars = sum(char_count.values())
|
| 46 |
+
char_frequencies = {char: count / total_chars for char, count in char_count.items()}
|
| 47 |
+
|
| 48 |
+
# Syllable frequency
|
| 49 |
+
syllables = break_syllables(text)
|
| 50 |
+
syllable_count = Counter(syllables)
|
| 51 |
+
total_syllables = sum(syllable_count.values())
|
| 52 |
+
syllable_frequencies = {syl: count / total_syllables for syl, count in syllable_count.items()}
|
| 53 |
+
|
| 54 |
+
return {'char_freq': char_frequencies, 'syl_freq': syllable_frequencies}
|
| 55 |
+
|
| 56 |
+
def save_profile(profile, output_path):
|
| 57 |
+
with open(output_path, 'w', encoding='utf-8') as file:
|
| 58 |
+
json.dump(profile, file)
|
| 59 |
+
|
| 60 |
+
def load_profiles(profile_folder):
|
| 61 |
+
profiles = {}
|
| 62 |
+
for profile_file in os.listdir(profile_folder):
|
| 63 |
+
with open(os.path.join(profile_folder, profile_file), 'r', encoding='utf-8') as file:
|
| 64 |
+
profiles[profile_file] = json.load(file)
|
| 65 |
+
return profiles
|
| 66 |
+
|
| 67 |
+
def detect_language(text, profiles):
|
| 68 |
+
# Perform both character and syllable segmentation
|
| 69 |
+
char_freq = {char: count / len(text) for char, count in Counter(text).items()}
|
| 70 |
+
syl_freq = {syl: count / len(break_syllables(text)) for syl, count in Counter(break_syllables(text)).items()}
|
| 71 |
+
|
| 72 |
+
scores = {}
|
| 73 |
+
for language, profile in profiles.items():
|
| 74 |
+
char_score = sum(char_freq.get(char, 0) * profile['char_freq'].get(char, 0) for char in char_freq)
|
| 75 |
+
syl_score = sum(syl_freq.get(syl, 0) * profile['syl_freq'].get(syl, 0) for syl in syl_freq)
|
| 76 |
+
|
| 77 |
+
# Combine scores (you can adjust the weightage as needed)
|
| 78 |
+
combined_score = (char_score + syl_score) / 2
|
| 79 |
+
scores[language] = combined_score
|
| 80 |
+
|
| 81 |
+
detected_language = max(scores, key=scores.get)
|
| 82 |
+
return detected_language
|
| 83 |
+
|
| 84 |
+
|