ye-nlp commited on
Commit
0d84e21
·
1 Parent(s): 3168ad8

added shell scripts etc.

Browse files
README.txt ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # For your information
2
+
3
+ Written by Ye Kyaw Thu, LU Lab., Myanmar
4
+ Last updated: 31 Jan 2024
5
+
6
+ Filename: exp.sh
7
+ This shell script builds all language profiles and performs detection. It demonstrates how to build and detect languages using all the detection approaches I have implemented.
8
+ Note: Completing this process will take some time.
9
+
10
+ ## Approach: Character+Syllable Frequency
11
+
12
+ build_profile_with_char_syl_freq.sh
13
+ detect_with_char_syl_freq.sh
14
+
15
+ ## Approach: Character+Syllable Ngram with Bayes
16
+
17
+ build_profile_with_char_syl_ngram.sh
18
+ detect_with_char_syl_ngram.sh
19
+
20
+ ## Approach: Word2Vec, FastText Embedding
21
+
22
+ build_profile_with_embeddings.sh
23
+ detect_with_embedding.sh
24
+
25
+ ## Approach: FastText Classifier
26
+ train_with_fasttext_classifier.sh
27
+ detect_with_fasttext.sh
28
+
29
+ ## Approach: Neural Network Modeling
30
+
31
+ train_with_nerual.sh
32
+ detect_with_neural.sh
33
+
34
+ ## Folder Information
35
+
36
+ (base) ye@lst-gpu-3090:~/exp/myNLP/lang_detect$ tree . -d -L 1
37
+ .
38
+ ├── char_syl_freq
39
+ ├── char_syl_ngram
40
+ ├── data
41
+ ├── embedding
42
+ ├── fasttext_class
43
+ ├── log
44
+ ├── neural
45
+ ├── preprocess
46
+ ├── profile
47
+ ├── tmp
48
+ └── tool
49
+
50
+ 11 directories
51
+
52
+ Here,
53
+ - The 'char_syl_freq/' folder contains the 'char_syl_freq' module.
54
+ - The 'char_syl_ngram/' folder contains the 'char_syl_ngram' module.
55
+ - The 'data/' folder holds the data used for building Myanmar language profiles and for detection.
56
+ - The 'embedding/' folder includes modules for word embeddings (e.g., word2vec, fasttext).
57
+ - The 'fasttext_class' folder contains the FastText classification module.
58
+ - The 'log/' folder stores log files for building and detecting using all six approaches.
59
+ - The 'neural/' folder contains the neural network-based language detection module.
60
+ - The 'preprocess/' folder includes various preprocessing scripts.
61
+ - The 'profile/' folder holds built language profiles for Bamar (Myanmar language), Beik, Dawei, Mon, Pao, Po Kayin, Rakhine, Sgaw Kayin, and Shan.
62
+
63
+
build_profile_with_char_syl_freq.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define the base directory and the Python script
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+ TEXT_DIR="$BASE_DIR/data/raw" # Assuming you have raw text files for training
7
+ PROFILE_DIR="$BASE_DIR/profile/char_syl_freq_profile"
8
+
9
+ # Create the profile directory if it doesn't exist
10
+ mkdir -p "$PROFILE_DIR"
11
+
12
+ # Loop through each text file in the text_files directory
13
+ for file in "$TEXT_DIR"/*.raw; do
14
+ # Extract the language name from the filename
15
+ filename=$(basename -- "$file")
16
+ language=${filename%%.*}
17
+
18
+ # Define the output profile filename
19
+ output_profile="$PROFILE_DIR/${language}_combined_profile.json"
20
+
21
+ # Run the Python script to create the profile
22
+ if python3 "$PYTHON_SCRIPT" --mode train --input "$file" --output "$output_profile" --approach char_syl_freq; then
23
+ echo "Created combined character and syllable language profile for $language."
24
+ else
25
+ echo "Error in creating profile for $language. Check the input file and script."
26
+ exit 1
27
+ fi
28
+ done
29
+
30
+ echo "All language profiles have been created."
31
+
build_profile_with_char_syl_ngram.sh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define the base directory and the Python script
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect/"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+ TEXT_DIR="$BASE_DIR/data/raw" # Assuming you have raw text files for training
7
+ OUTPUT_BASE_DIR="$BASE_DIR/profile/" # Base directory for output profiles
8
+
9
+ # Define the n-gram values to iterate through
10
+ NGRAM_VALUES=(3 4 5)
11
+
12
+ # Loop through each n-gram value
13
+ for ngram_value in "${NGRAM_VALUES[@]}"; do
14
+ # Create the output directory for this n-gram value
15
+ OUTPUT_DIR="$OUTPUT_BASE_DIR/${ngram_value}gram_profile"
16
+ mkdir -p "$OUTPUT_DIR"
17
+
18
+ # Loop through each text file in the text_files directory
19
+ for file in "$TEXT_DIR"/*.raw; do
20
+ # Extract the language name from the filename
21
+ filename=$(basename -- "$file")
22
+ language=${filename%%.*}
23
+
24
+ # Define the output profile filename based on n-gram value
25
+ output_profile="$OUTPUT_DIR/${language}.${ngram_value}gram"
26
+
27
+ # Run the Python script to create the profile
28
+ if python3 "$PYTHON_SCRIPT" --mode train --ngram "$ngram_value" --input "$file" --output "$output_profile" --approach char_syl_ngram; then
29
+ echo "Created ${ngram_value}-gram language profile for $language."
30
+ else
31
+ echo "Error in creating ${ngram_value}-gram profile for $language. Check the input file and script."
32
+ exit 1
33
+ fi
34
+ done
35
+ done
36
+
37
+ echo "All language profiles have been created."
38
+
build_profile_with_embeddings.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define the base directory and the Python script
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect/"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py" # Replace with the name of your Python script
6
+ SYL_SEG_DIR="$BASE_DIR/data/syl_seg"
7
+ WORD2VEC_DIR="$BASE_DIR/profile/word2vec"
8
+ FASTTEXT_DIR="$BASE_DIR/profile/fasttext"
9
+
10
+ # Create directories for the models if they don't exist
11
+ mkdir -p "$WORD2VEC_DIR"
12
+ mkdir -p "$FASTTEXT_DIR"
13
+
14
+ # Loop through each .syl file in the syl_seg directory
15
+ for file in "$SYL_SEG_DIR"/*.syl; do
16
+ # Extract the language name from the filename
17
+ filename=$(basename -- "$file")
18
+ language=${filename%%.*}
19
+
20
+ # Define the output model filenames
21
+ word2vec_output="$WORD2VEC_DIR/${language}_word2vec.model"
22
+ fasttext_output="$FASTTEXT_DIR/${language}_fasttext.model"
23
+
24
+ # Train Word2Vec model
25
+ python3 "$PYTHON_SCRIPT" --mode train --approach word2vec_embedding --input "$file" --output "$word2vec_output"
26
+ echo "Word2Vec model for $language saved to $word2vec_output"
27
+
28
+ # Train FastText model
29
+ python3 "$PYTHON_SCRIPT" --mode train --approach fasttext_embedding --input "$file" --output "$fasttext_output"
30
+ echo "FastText model for $language saved to $fasttext_output"
31
+ done
32
+
33
+ echo "Training completed for all languages."
34
+
demo_usage.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Demo Usage of myZagar language detection library.
3
+ Written by Ye Kyaw Thu, LU Lab., Myanmar
4
+ Last updated: 31 Jan 2024
5
+
6
+ Usage: python ./demo_usage.py --help
7
+ """
8
+
9
+
10
+ import argparse
11
+ import os
12
+ import sys
13
+ from collections import defaultdict, Counter
14
+ from sklearn.model_selection import train_test_split
15
+ from sklearn.preprocessing import LabelEncoder
16
+
17
+ from char_syl_freq import create_frequency_profile, detect_language as detect_language_freq, save_profile as save_freq_profile, load_profiles as load_freq_profiles, break_syllables
18
+ from char_syl_ngram import train_naive_bayes, detect_language_naive_bayes, sylbreak
19
+ from embedding import train_embeddings, save_model as save_embed_model, load_model as load_embed_model, detect_language as detect_language_embed
20
+ from fasttext_class import train_model as fasttext_class_train_model, test_model as fasttext_class_test_model, predict_language
21
+ from neural import load_data as load_neural_data, preprocess_data as neural_preprocess_data, build_model as build_neural_model, train_model as train_neural_model, save_model_and_artifacts, load_model_and_artifacts, detect_language as detect_language_neural
22
+
23
+ def main():
24
+ parser = argparse.ArgumentParser(description='Language detection based on character, n-gram frequency analysis, embeddings.')
25
+
26
+ # Add approach argument to select the approach
27
+ parser.add_argument('--approach', choices=['char_syl_freq', 'char_syl_ngram', 'word2vec_embedding', 'fasttext_embedding', 'fasttext_class', 'neural_network'], required=True, help='Select the approach: char_syl_freq or char_syl_ngram or word2vec_embedding or fasttext_embedding')
28
+ parser.add_argument('--mode', choices=['train', 'detect', 'predict'], required=True, help='Mode of operation: build or detect or predict, predict is only for FastText Classification Model')
29
+ parser.add_argument('--input', type=str, required=True, help='Input file path.')
30
+ parser.add_argument('--output', type=str, help='Output file path (only for profile creation).', default=None)
31
+ parser.add_argument('--profiles', type=str, help='Folder path containing saved frequency profiles (only for detection).', default=None)
32
+ parser.add_argument('--ngram', type=int, help='Maximum n-gram value (default: 3).', default=3)
33
+ parser.add_argument('--size', type=int, default=100, help='Dimension of the embeddings (default: 100)')
34
+ parser.add_argument('--window', type=int, default=5, help='Maximum distance between the current and predicted word (default: 5), for embeddings')
35
+ parser.add_argument('--min_count', type=int, default=2, help='Ignores all words with total frequency lower than this (default: 2), for embeddings')
36
+ parser.add_argument('--epoch', type=int, default=25, help='Number of epochs for training (default: 25), for FastText Classification and Neural Network approaches')
37
+ parser.add_argument('--lr', type=float, default=1.0, help='Learning rate for training (default: 1.0), for FastText Classification and Neural Network approaches')
38
+ parser.add_argument('--wordNgrams', type=int, default=2, help='Max length of word ngram (default: 2)')
39
+ parser.add_argument('--num_words', type=int, default=10000, help='Number of words to consider from the dataset (default: 10000)')
40
+ parser.add_argument('--max_len', type=int, default=100, help='Maximum length of the sequences (default: 100)')
41
+ parser.add_argument('--batch_size', type=int, default=32, help='Batch size for training (default: 32)')
42
+ parser.add_argument('--verbose', action='store_true', help='Display warning messages., for FastText Classification')
43
+
44
+ args = parser.parse_args()
45
+
46
+ input_data = args.input
47
+ if os.path.isfile(input_data):
48
+ with open(input_data, 'r', encoding='utf-8') as file:
49
+ raw_text = file.read()
50
+ else:
51
+ raw_text = input_data
52
+
53
+ if args.approach == 'char_syl_freq':
54
+ if args.mode == 'train':
55
+ if not args.input or not args.output or not args.approach:
56
+ print("For training, both --input, --approach and --output arguments are required.")
57
+ else:
58
+ frequency_profile = create_frequency_profile(raw_text)
59
+ save_freq_profile(frequency_profile, args.output)
60
+ print(f"Frequency profile saved to {args.output}")
61
+
62
+ elif args.mode == 'detect':
63
+ if not args.input or not args.profiles:
64
+ print("For detection, both --input and --profiles arguments are required.")
65
+ else:
66
+ profiles = load_freq_profiles(args.profiles)
67
+ detected_language = detect_language_freq(raw_text, profiles)
68
+ print(f"Detected language: {detected_language}")
69
+
70
+ elif args.approach == 'char_syl_ngram':
71
+ if args.mode == 'train':
72
+ # Create the "profiles" folder if it doesn't exist
73
+ #if not os.path.exists('profiles'):
74
+ # os.makedirs('profile')
75
+
76
+ # Create both character and syllable profiles
77
+ if not args.input or not args.output:
78
+ print("For training, both --input and --output arguments are required.")
79
+ else:
80
+ char_profile = train_naive_bayes(raw_text, args.ngram, use_syllables=False)
81
+ syl_profile = train_naive_bayes(raw_text, args.ngram, use_syllables=True)
82
+ combined_profile = {**char_profile, **syl_profile} # Combine both profiles
83
+
84
+ if args.output:
85
+ with open(args.output, 'w', encoding='utf-8') as file:
86
+ for ngram, prob in combined_profile.items():
87
+ file.write(f"{ngram}\t{prob}\n")
88
+ else:
89
+ for ngram, prob in combined_profile.items():
90
+ sys.stdout.write(f"{ngram}\t{prob}\n")
91
+
92
+ elif args.mode == 'detect':
93
+
94
+ if not args.profiles:
95
+ print("Please provide a profiles folder for detection using -p or --profile_folder!")
96
+ sys.exit(1)
97
+
98
+ # Character-based detection
99
+ char_probabilities = detect_language_naive_bayes(raw_text, args.profiles, args.ngram, use_syllables=False, verbose=args.verbose)
100
+ print("Character-based Detection:")
101
+ for lang, prob in char_probabilities.items():
102
+ print(f"{lang}: {prob*100:.2f}%")
103
+
104
+ # Syllable-based detection
105
+ syl_probabilities = detect_language_naive_bayes(raw_text, args.profiles, args.ngram, use_syllables=True, verbose=args.verbose)
106
+ print("\nSyllable-based Detection:")
107
+ for lang, prob in syl_probabilities.items():
108
+ print(f"{lang}: {prob*100:.2f}%")
109
+
110
+ # Combined detection
111
+ print("\nCombined Character and Syllable-based Detection:")
112
+ combined_scores = defaultdict(float)
113
+ for lang in char_probabilities:
114
+ combined_scores[lang] = (char_probabilities[lang] + syl_probabilities[lang]) / 2
115
+ for lang, score in combined_scores.items():
116
+ print(f"{lang}: {score*100:.2f}%")
117
+
118
+ elif args.approach == 'word2vec_embedding':
119
+
120
+ method = "word2vec"
121
+ if args.mode == 'train':
122
+ if not args.input or not args.output or not args.approach:
123
+ print("For training, both --input, --approach and --output arguments are required.")
124
+ else:
125
+ model = train_embeddings(args.input, method, args.size, args.window, args.min_count)
126
+ save_embed_model(model, args.output)
127
+ print(f"Model saved to {args.output}")
128
+ elif args.mode == 'detect':
129
+ if not args.input or not args.approach or not args.profiles:
130
+ print("For detection, both --input, --approach and --profiles arguments are required.")
131
+ else:
132
+ models = {fname.split('.')[0]: load_embed_model(os.path.join(args.profile, fname))
133
+ for fname in os.listdir(args.profile) if fname.endswith('.model')}
134
+
135
+ # Check if the input is a file path or a string
136
+ if os.path.isfile(args.input):
137
+ with open(args.input, 'r', encoding='utf-8') as file:
138
+ text = file.read()
139
+ else:
140
+ text = args.input
141
+
142
+ detected_language = detect_language(text, models)
143
+ print(f"Detected language: {detected_language}")
144
+
145
+ elif args.approach == 'fasttext_embedding':
146
+
147
+ method = "fasttext"
148
+ if args.mode == 'train':
149
+ if not args.input or not args.output or not args.approach:
150
+ print("For training, both --input, --approach and --output arguments are required.")
151
+ else:
152
+ model = train_embeddings(args.input, method, args.size, args.window, args.min_count)
153
+ save_embed_model(model, args.output)
154
+ print(f"Model saved to {args.output}")
155
+
156
+ elif args.mode == 'detect':
157
+ if not args.input or not args.model_folder or not args.approach:
158
+ print("For detection, both --input, --approach and --model_folder arguments are required.")
159
+ else:
160
+ models = {fname.split('.')[0]: load_embed_model(os.path.join(args.profile, fname))
161
+ for fname in os.listdir(args.profile) if fname.endswith('.model')}
162
+
163
+ # Check if the input is a file path or a string
164
+ if os.path.isfile(args.input):
165
+ with open(args.input, 'r', encoding='utf-8') as file:
166
+ text = file.read()
167
+ else:
168
+ text = args.input
169
+
170
+ detected_language = detect_language(text, models)
171
+ print(f"Detected language: {detected_language}")
172
+
173
+ elif args.approach == 'fasttext_class':
174
+ if args.mode == 'train':
175
+ if not args.input or not args.output or not args.approach:
176
+ print("For training, both --input, --approach and --output arguments are required.")
177
+ else:
178
+ fasttext_class_train_model(args.input, args.output, args.epoch, args.lr, args.wordNgrams)
179
+
180
+ elif args.mode == 'detect':
181
+ if not args.profiles or not args.input:
182
+ print("For testing, both --profiles and --input arguments are required.")
183
+ else:
184
+ fasttext_class_test_model(args.profiles, args.input)
185
+
186
+ elif args.mode == 'predict':
187
+ if not args.profiles or not args.input:
188
+ print("For prediction, both --profiles and --input arguments are required.")
189
+ else:
190
+ if os.path.isfile(args.input):
191
+ predict_language(args.profiles, args.input, is_file=True)
192
+ else:
193
+ prediction = predict_language(args.profiles, args.input)
194
+ print(f"Predicted language: {prediction}")
195
+
196
+ elif args.approach == 'neural_network':
197
+ if args.mode == 'train':
198
+ if not args.input or not args.output or not args.approach:
199
+ print("For training, both --input, --approach and --output arguments are required.")
200
+ exit(1)
201
+
202
+ texts, labels = load_neural_data(args.input)
203
+ X, y, tokenizer, label_encoder = neural_preprocess_data(texts, labels, args.num_words, args.max_len)
204
+ X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
205
+
206
+ model = build_neural_model(args.max_len, len(label_encoder.classes_), args.num_words)
207
+ train_neural_model(model, X_train, y_train, X_val, y_val, args.epoch, args.batch_size)
208
+ save_model_and_artifacts(model, tokenizer, label_encoder, args.output)
209
+ print(f"Model and artifacts saved to {args.output}")
210
+
211
+ elif args.mode == 'detect':
212
+ if not args.input or not args.profiles:
213
+ print("For detection, both --input, --approach and --profiles arguments are required.")
214
+ exit(1)
215
+
216
+ model, tokenizer, label_encoder = load_model_and_artifacts(args.profiles)
217
+
218
+ if os.path.isfile(args.input):
219
+ with open(args.input, 'r', encoding='utf-8') as file:
220
+ text = file.read().strip()
221
+ else:
222
+ text = args.input.strip()
223
+
224
+ detected_language = detect_language_neural(text, model, tokenizer, label_encoder, args.max_len)
225
+ print(f"Detected language: {detected_language}")
226
+
227
+ else:
228
+ print("Invalid approach. Please choose either 'char_syl_freq' or 'char_syl_ngram' or 'word2vec_embedding' or 'fasttext_embedding' or 'fasttext_class' or 'neural_network.")
229
+
230
+ if __name__ == '__main__':
231
+ main()
232
+
detect_with_char_syl_freq.sh ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define directories
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect/"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+ INPUT_DIR="$BASE_DIR/data/eg_input_raw"
7
+ PROFILE_DIR="$BASE_DIR/profile/char_syl_freq_profile"
8
+
9
+ # Number of random sentences to test
10
+ NUM_RANDOM_SENTENCES=10
11
+
12
+ # Loop through each input file in the directory
13
+ for input_file in "$INPUT_DIR"/*.raw; do
14
+ echo "Processing file: $(basename "$input_file")"
15
+
16
+ # Run detection on the entire file
17
+ python "$PYTHON_SCRIPT" --input "$input_file" --mode detect --profiles "$PROFILE_DIR" --approach char_syl_freq;
18
+
19
+ # Extract and predict random sentences from the file
20
+ for i in $(seq 1 $NUM_RANDOM_SENTENCES); do
21
+ random_sentence=$(shuf -n 1 "$input_file")
22
+ echo "Predicting random sentence $i: $random_sentence"
23
+ python "$PYTHON_SCRIPT" --input "$random_sentence" --mode detect --profiles "$PROFILE_DIR" --approach char_syl_freq;
24
+ done
25
+
26
+ echo ""
27
+ done
28
+
29
+ echo "All processing completed."
30
+
detect_with_char_syl_ngram.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define base directory and script location
4
+ BASE_DIR="/home/ye/exp/myNLP/lang_detect/"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+ INPUT_DIR="$BASE_DIR/data/eg_input_raw"
7
+
8
+ # Number of ngrams and random sentences
9
+ NGRAMS=(3 4 5)
10
+ NUM_RANDOM_SENTENCES=10
11
+
12
+ # Loop through each input file
13
+ for input_file in "$INPUT_DIR"/*.raw; do
14
+ echo "Processing file: $(basename "$input_file")"
15
+
16
+ # Loop through each ngram
17
+ for ngram in "${NGRAMS[@]}"; do
18
+ PROFILE_DIR="$BASE_DIR/profile/${ngram}gram_profile"
19
+
20
+ # Run the first command
21
+ echo "Running with ngram=$ngram on full file"
22
+ python "$PYTHON_SCRIPT" --mode detect --input "$input_file" --profiles "$PROFILE_DIR" --ngram $ngram --approach char_syl_ngram
23
+
24
+ # Extract and run the second command on random sentences
25
+ for i in $(seq 1 $NUM_RANDOM_SENTENCES); do
26
+ random_sentence=$(shuf -n 1 "$input_file")
27
+ echo "Running with ngram=$ngram on random sentence $i: $random_sentence"
28
+ python "$PYTHON_SCRIPT" --mode detect --input "$random_sentence" --profiles "$PROFILE_DIR" --ngram $ngram --approach char_syl_ngram
29
+ done
30
+ done
31
+
32
+ echo ""
33
+ done
34
+
35
+ echo "All processing completed."
36
+
detect_with_embedding.sh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define base directory, Python script, and input directory
4
+ BASE_DIR="$HOME/exp/sylbreak4all/lang_detection/embedding"
5
+ PYTHON_SCRIPT="$BASE_DIR/embed_lang_detect.py" # Replace with the name of your Python script
6
+ INPUT_DIR="$BASE_DIR/data/eg_input"
7
+ WORD2VEC_DIR="$BASE_DIR/word2vec"
8
+ FASTTEXT_DIR="$BASE_DIR/fasttext"
9
+
10
+ # Function to run language detection
11
+ run_detection() {
12
+ model_type=$1
13
+ model_dir=$2
14
+ echo "Running language detection using $model_type models..."
15
+ for file in "$INPUT_DIR"/*; do
16
+ filename=$(basename -- "$file")
17
+ detected_language=$(python3 "$PYTHON_SCRIPT" --mode detect --input "$file" --model_folder "$model_dir")
18
+ echo "File: $filename - Detected Language with $model_type: $detected_language"
19
+
20
+ # Run detection on random sentences from the file, 10 times
21
+ for i in {1..10}; do
22
+ random_sentence=$(shuf -n 1 "$file")
23
+ echo "Attempt $i - Random sentence from $filename: $random_sentence"
24
+ detected_language_sentence=$(python3 "$PYTHON_SCRIPT" --mode detect --input "$random_sentence" --model_folder "$model_dir")
25
+ echo "Detected Language with $model_type: $detected_language_sentence"
26
+ done
27
+ echo ""
28
+ done
29
+ }
30
+
31
+ # Run detection using Word2Vec models
32
+ run_detection "Word2Vec" "$WORD2VEC_DIR"
33
+
34
+ # Run detection using FastText models
35
+ run_detection "FastText" "$FASTTEXT_DIR"
36
+
37
+ echo "Language detection completed for all files."
38
+
detect_with_fasttext.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define directories
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+
7
+ # Define the test file
8
+ TEST_FILE="$BASE_DIR/data/all_test.fasttext.shuf"
9
+
10
+ # Number of random sentences to test
11
+ NUM_RANDOM_SENTENCES=10
12
+
13
+ # Loop through each model file
14
+ for model in "$BASE_DIR"/profile/fasttext_class/*gram.model.bin; do
15
+ echo "Processing with model: $(basename "$model")"
16
+
17
+ # Test the model with the specific test file
18
+ echo "Testing with file: $(basename "$TEST_FILE") and model: $(basename "$model")"
19
+ time python "$PYTHON_SCRIPT" --mode detect --approach fasttext_class \
20
+ --profile "$model" --input "$TEST_FILE"
21
+
22
+ # Extract and predict random sentences from the test file
23
+ for i in $(seq 1 $NUM_RANDOM_SENTENCES); do
24
+ random_sentence=$(shuf -n 1 "$TEST_FILE")
25
+ echo "Predicting random sentence $i: $random_sentence"
26
+ python "$PYTHON_SCRIPT" --mode predict --approach fasttext_class \
27
+ --profile "$model" --input "$random_sentence"
28
+ done
29
+
30
+ echo ""
31
+ done
32
+
33
+ echo "All processing completed."
34
+
detect_with_neural.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define base directory and Python script
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+
7
+ # Directory containing the language files
8
+ INPUT_DIR="$BASE_DIR/data/eg_input/"
9
+
10
+ # Directory of the trained model
11
+ MODEL_DIR="$BASE_DIR/profile/neural/"
12
+
13
+ # Number of random sentences to test
14
+ NUM_RANDOM_SENTENCES=10
15
+
16
+ # Loop through each .txt file in the input directory
17
+ for file in "$INPUT_DIR"*.txt; do
18
+ echo "Processing file $file..."
19
+ python "$PYTHON_SCRIPT" --mode detect --approach neural_network --input "$file" --profiles "$MODEL_DIR"
20
+
21
+ # Extract and predict random sentences from the file
22
+ for i in $(seq 1 $NUM_RANDOM_SENTENCES); do
23
+ random_sentence=$(shuf -n 1 "$file")
24
+ echo "Predicting random sentence $i from $file: $random_sentence"
25
+ python "$PYTHON_SCRIPT" --mode detect --approach neural_network --input "$random_sentence" --profiles "$MODEL_DIR"
26
+ done
27
+
28
+ echo ""
29
+ done
30
+
31
+ echo "All processing completed."
32
+
exp.sh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Demo profile building and detecting with all approaches
4
+ # Written by Ye Kyaw Thu, LU Lab., Myanmar
5
+ # Last updated: 31 Jan 2024
6
+
7
+ mkdir log;
8
+
9
+ time ./build_profile_with_char_syl_freq.sh | tee ./log/char_syl_freq.train.log
10
+ time ./detect_with_char_syl_freq.sh | tee ./log/char_syl_freq.detect.log
11
+
12
+ time ./build_profile_with_char_syl_ngram.sh | tee ./log/| tee ./log/char_syl_ngram.train.log
13
+ time ./detect_with_char_syl_ngram.sh | tee ./log/char_syl_ngram.detect.log
14
+
15
+ time ./build_profile_with_embeddings.sh | tee ./log/embeddings.train.log
16
+ time ./detect_with_embedding.sh | tee ./log/embeddings.detect.log
17
+
18
+ time ./train_with_fasttext_classifier.sh | tee ./log/fasttext_class.train.log
19
+ time ./detect_with_fasttext.sh | tee ./log/fasttext_class.detect.log
20
+
21
+ time ./train_with_nerual.sh | tee ./log/neural.train.log
22
+ time ./detect_with_neural.sh | tee ./log/neural.detect.log
train_with_fasttext_classifier.sh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Define the base directory, input data, and the Python script
4
+ BASE_DIR="$HOME/exp/myNLP/lang_detect"
5
+ PYTHON_SCRIPT="$BASE_DIR/demo_usage.py"
6
+ INPUT_DATA="$BASE_DIR/data/all_languages.fasttext"
7
+ FASTTEXT_DIR="$BASE_DIR/profile/fasttext_class"
8
+
9
+ # Create the directory for FastText models if it doesn't exist
10
+ mkdir -p "$FASTTEXT_DIR"
11
+
12
+ # Loop over the desired n-grams (3 to 7)
13
+ for ngram in {3..7}; do
14
+ # Define the output model filename
15
+ model_output="${FASTTEXT_DIR}/${ngram}gram.model.bin"
16
+
17
+ # Training the model
18
+ time python3 "$PYTHON_SCRIPT" --mode train --input "$INPUT_DATA" \
19
+ --output "$model_output" --epoch 25 --lr 1.0 --wordNgrams $ngram \
20
+ --approach fasttext_class
21
+
22
+ echo "FastText model with ${ngram}-gram saved to $model_output"
23
+ done
24
+
25
+ echo "Training completed for all n-grams."
26
+
train_with_nerual.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ python ./demo_usage.py --mode train --approach neural_network \
4
+ --input ./data/all_languages_neural.txt --output ./profile/neural/