myZagar / demo_usage.py
ye-nlp's picture
added shell scripts etc.
0d84e21
"""
Demo Usage of myZagar language detection library.
Written by Ye Kyaw Thu, LU Lab., Myanmar
Last updated: 31 Jan 2024
Usage: python ./demo_usage.py --help
"""
import argparse
import os
import sys
from collections import defaultdict, Counter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from char_syl_freq import create_frequency_profile, detect_language as detect_language_freq, save_profile as save_freq_profile, load_profiles as load_freq_profiles, break_syllables
from char_syl_ngram import train_naive_bayes, detect_language_naive_bayes, sylbreak
from embedding import train_embeddings, save_model as save_embed_model, load_model as load_embed_model, detect_language as detect_language_embed
from fasttext_class import train_model as fasttext_class_train_model, test_model as fasttext_class_test_model, predict_language
from neural import load_data as load_neural_data, preprocess_data as neural_preprocess_data, build_model as build_neural_model, train_model as train_neural_model, save_model_and_artifacts, load_model_and_artifacts, detect_language as detect_language_neural
def main():
parser = argparse.ArgumentParser(description='Language detection based on character, n-gram frequency analysis, embeddings.')
# Add approach argument to select the approach
parser.add_argument('--approach', choices=['char_syl_freq', 'char_syl_ngram', 'word2vec_embedding', 'fasttext_embedding', 'fasttext_class', 'neural_network'], required=True, help='Select the approach: char_syl_freq or char_syl_ngram or word2vec_embedding or fasttext_embedding')
parser.add_argument('--mode', choices=['train', 'detect', 'predict'], required=True, help='Mode of operation: build or detect or predict, predict is only for FastText Classification Model')
parser.add_argument('--input', type=str, required=True, help='Input file path.')
parser.add_argument('--output', type=str, help='Output file path (only for profile creation).', default=None)
parser.add_argument('--profiles', type=str, help='Folder path containing saved frequency profiles (only for detection).', default=None)
parser.add_argument('--ngram', type=int, help='Maximum n-gram value (default: 3).', default=3)
parser.add_argument('--size', type=int, default=100, help='Dimension of the embeddings (default: 100)')
parser.add_argument('--window', type=int, default=5, help='Maximum distance between the current and predicted word (default: 5), for embeddings')
parser.add_argument('--min_count', type=int, default=2, help='Ignores all words with total frequency lower than this (default: 2), for embeddings')
parser.add_argument('--epoch', type=int, default=25, help='Number of epochs for training (default: 25), for FastText Classification and Neural Network approaches')
parser.add_argument('--lr', type=float, default=1.0, help='Learning rate for training (default: 1.0), for FastText Classification and Neural Network approaches')
parser.add_argument('--wordNgrams', type=int, default=2, help='Max length of word ngram (default: 2)')
parser.add_argument('--num_words', type=int, default=10000, help='Number of words to consider from the dataset (default: 10000)')
parser.add_argument('--max_len', type=int, default=100, help='Maximum length of the sequences (default: 100)')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size for training (default: 32)')
parser.add_argument('--verbose', action='store_true', help='Display warning messages., for FastText Classification')
args = parser.parse_args()
input_data = args.input
if os.path.isfile(input_data):
with open(input_data, 'r', encoding='utf-8') as file:
raw_text = file.read()
else:
raw_text = input_data
if args.approach == 'char_syl_freq':
if args.mode == 'train':
if not args.input or not args.output or not args.approach:
print("For training, both --input, --approach and --output arguments are required.")
else:
frequency_profile = create_frequency_profile(raw_text)
save_freq_profile(frequency_profile, args.output)
print(f"Frequency profile saved to {args.output}")
elif args.mode == 'detect':
if not args.input or not args.profiles:
print("For detection, both --input and --profiles arguments are required.")
else:
profiles = load_freq_profiles(args.profiles)
detected_language = detect_language_freq(raw_text, profiles)
print(f"Detected language: {detected_language}")
elif args.approach == 'char_syl_ngram':
if args.mode == 'train':
# Create the "profiles" folder if it doesn't exist
#if not os.path.exists('profiles'):
# os.makedirs('profile')
# Create both character and syllable profiles
if not args.input or not args.output:
print("For training, both --input and --output arguments are required.")
else:
char_profile = train_naive_bayes(raw_text, args.ngram, use_syllables=False)
syl_profile = train_naive_bayes(raw_text, args.ngram, use_syllables=True)
combined_profile = {**char_profile, **syl_profile} # Combine both profiles
if args.output:
with open(args.output, 'w', encoding='utf-8') as file:
for ngram, prob in combined_profile.items():
file.write(f"{ngram}\t{prob}\n")
else:
for ngram, prob in combined_profile.items():
sys.stdout.write(f"{ngram}\t{prob}\n")
elif args.mode == 'detect':
if not args.profiles:
print("Please provide a profiles folder for detection using -p or --profile_folder!")
sys.exit(1)
# Character-based detection
char_probabilities = detect_language_naive_bayes(raw_text, args.profiles, args.ngram, use_syllables=False, verbose=args.verbose)
print("Character-based Detection:")
for lang, prob in char_probabilities.items():
print(f"{lang}: {prob*100:.2f}%")
# Syllable-based detection
syl_probabilities = detect_language_naive_bayes(raw_text, args.profiles, args.ngram, use_syllables=True, verbose=args.verbose)
print("\nSyllable-based Detection:")
for lang, prob in syl_probabilities.items():
print(f"{lang}: {prob*100:.2f}%")
# Combined detection
print("\nCombined Character and Syllable-based Detection:")
combined_scores = defaultdict(float)
for lang in char_probabilities:
combined_scores[lang] = (char_probabilities[lang] + syl_probabilities[lang]) / 2
for lang, score in combined_scores.items():
print(f"{lang}: {score*100:.2f}%")
elif args.approach == 'word2vec_embedding':
method = "word2vec"
if args.mode == 'train':
if not args.input or not args.output or not args.approach:
print("For training, both --input, --approach and --output arguments are required.")
else:
model = train_embeddings(args.input, method, args.size, args.window, args.min_count)
save_embed_model(model, args.output)
print(f"Model saved to {args.output}")
elif args.mode == 'detect':
if not args.input or not args.approach or not args.profiles:
print("For detection, both --input, --approach and --profiles arguments are required.")
else:
models = {fname.split('.')[0]: load_embed_model(os.path.join(args.profile, fname))
for fname in os.listdir(args.profile) if fname.endswith('.model')}
# Check if the input is a file path or a string
if os.path.isfile(args.input):
with open(args.input, 'r', encoding='utf-8') as file:
text = file.read()
else:
text = args.input
detected_language = detect_language(text, models)
print(f"Detected language: {detected_language}")
elif args.approach == 'fasttext_embedding':
method = "fasttext"
if args.mode == 'train':
if not args.input or not args.output or not args.approach:
print("For training, both --input, --approach and --output arguments are required.")
else:
model = train_embeddings(args.input, method, args.size, args.window, args.min_count)
save_embed_model(model, args.output)
print(f"Model saved to {args.output}")
elif args.mode == 'detect':
if not args.input or not args.model_folder or not args.approach:
print("For detection, both --input, --approach and --model_folder arguments are required.")
else:
models = {fname.split('.')[0]: load_embed_model(os.path.join(args.profile, fname))
for fname in os.listdir(args.profile) if fname.endswith('.model')}
# Check if the input is a file path or a string
if os.path.isfile(args.input):
with open(args.input, 'r', encoding='utf-8') as file:
text = file.read()
else:
text = args.input
detected_language = detect_language(text, models)
print(f"Detected language: {detected_language}")
elif args.approach == 'fasttext_class':
if args.mode == 'train':
if not args.input or not args.output or not args.approach:
print("For training, both --input, --approach and --output arguments are required.")
else:
fasttext_class_train_model(args.input, args.output, args.epoch, args.lr, args.wordNgrams)
elif args.mode == 'detect':
if not args.profiles or not args.input:
print("For testing, both --profiles and --input arguments are required.")
else:
fasttext_class_test_model(args.profiles, args.input)
elif args.mode == 'predict':
if not args.profiles or not args.input:
print("For prediction, both --profiles and --input arguments are required.")
else:
if os.path.isfile(args.input):
predict_language(args.profiles, args.input, is_file=True)
else:
prediction = predict_language(args.profiles, args.input)
print(f"Predicted language: {prediction}")
elif args.approach == 'neural_network':
if args.mode == 'train':
if not args.input or not args.output or not args.approach:
print("For training, both --input, --approach and --output arguments are required.")
exit(1)
texts, labels = load_neural_data(args.input)
X, y, tokenizer, label_encoder = neural_preprocess_data(texts, labels, args.num_words, args.max_len)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
model = build_neural_model(args.max_len, len(label_encoder.classes_), args.num_words)
train_neural_model(model, X_train, y_train, X_val, y_val, args.epoch, args.batch_size)
save_model_and_artifacts(model, tokenizer, label_encoder, args.output)
print(f"Model and artifacts saved to {args.output}")
elif args.mode == 'detect':
if not args.input or not args.profiles:
print("For detection, both --input, --approach and --profiles arguments are required.")
exit(1)
model, tokenizer, label_encoder = load_model_and_artifacts(args.profiles)
if os.path.isfile(args.input):
with open(args.input, 'r', encoding='utf-8') as file:
text = file.read().strip()
else:
text = args.input.strip()
detected_language = detect_language_neural(text, model, tokenizer, label_encoder, args.max_len)
print(f"Detected language: {detected_language}")
else:
print("Invalid approach. Please choose either 'char_syl_freq' or 'char_syl_ngram' or 'word2vec_embedding' or 'fasttext_embedding' or 'fasttext_class' or 'neural_network.")
if __name__ == '__main__':
main()