|
|
|
|
|
"""FinalTRi.ipynb |
|
|
|
|
|
Automatically generated by Colab. |
|
|
|
|
|
Original file is located at |
|
|
https://colab.research.google.com/drive/1hHv74seqk9eYq4JBX2saCvQZqt6xu8-P |
|
|
""" |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
from tensorflow.keras.preprocessing.text import Tokenizer |
|
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
|
from tensorflow.keras.models import Model |
|
|
from tensorflow.keras.layers import Input, LSTM, Dense, Embedding, AdditiveAttention, Concatenate |
|
|
|
|
|
import pandas as pd |
|
|
from sklearn.model_selection import train_test_split |
|
|
from tensorflow.keras.preprocessing.text import Tokenizer |
|
|
from tensorflow.keras.preprocessing.sequence import pad_sequences |
|
|
|
|
|
|
|
|
def load_data(file_path): |
|
|
try: |
|
|
df = pd.read_csv(file_path, encoding='ISO-8859-1') |
|
|
except UnicodeDecodeError: |
|
|
print("Error reading the file. Please check the encoding.") |
|
|
return [], [] |
|
|
return df['French'].tolist(), df['Ewondo'].tolist() |
|
|
|
|
|
|
|
|
def preprocess_data(french_sentences, ewondo_sentences): |
|
|
tokenizer_fr = Tokenizer() |
|
|
tokenizer_fr.fit_on_texts(french_sentences) |
|
|
vocab_size_fr = len(tokenizer_fr.word_index) + 1 |
|
|
|
|
|
tokenizer_ew = Tokenizer() |
|
|
tokenizer_ew.fit_on_texts(ewondo_sentences) |
|
|
vocab_size_ew = len(tokenizer_ew.word_index) + 1 |
|
|
|
|
|
|
|
|
fr_sequences = tokenizer_fr.texts_to_sequences(french_sentences) |
|
|
ew_sequences = tokenizer_ew.texts_to_sequences(ewondo_sentences) |
|
|
|
|
|
|
|
|
max_length_fr = max(len(seq) for seq in fr_sequences) |
|
|
max_length_ew = max(len(seq) for seq in ew_sequences) |
|
|
|
|
|
fr_sequences = pad_sequences(fr_sequences, maxlen=max_length_fr, padding='post') |
|
|
ew_sequences = pad_sequences(ew_sequences, maxlen=max_length_ew, padding='post') |
|
|
|
|
|
return fr_sequences, ew_sequences, vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew |
|
|
|
|
|
|
|
|
french_sentences, ewondo_sentences = load_data('french_ewondo_dictionary.csv') |
|
|
if french_sentences and ewondo_sentences: |
|
|
fr_sequences, ew_sequences, vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew = preprocess_data(french_sentences, ewondo_sentences) |
|
|
|
|
|
from tensorflow.keras.models import Model |
|
|
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense |
|
|
|
|
|
def create_model(vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew): |
|
|
|
|
|
encoder_inputs = Input(shape=(max_length_fr,)) |
|
|
encoder_embedding = Embedding(vocab_size_fr, 256)(encoder_inputs) |
|
|
encoder_lstm = LSTM(256, return_sequences=True, return_state=True) |
|
|
encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding) |
|
|
encoder_states = [state_h, state_c] |
|
|
|
|
|
|
|
|
decoder_inputs = Input(shape=(max_length_ew,)) |
|
|
decoder_embedding = Embedding(vocab_size_ew, 256)(decoder_inputs) |
|
|
decoder_lstm = LSTM(256, return_sequences=True, return_state=True) |
|
|
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states) |
|
|
decoder_dense = Dense(vocab_size_ew, activation='softmax') |
|
|
decoder_outputs = decoder_dense(decoder_outputs) |
|
|
|
|
|
|
|
|
model = Model([encoder_inputs, decoder_inputs], decoder_outputs) |
|
|
return model |
|
|
|
|
|
model = create_model(vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew) |
|
|
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) |
|
|
|
|
|
import numpy as np |
|
|
|
|
|
|
|
|
decoder_input_data = np.zeros_like(ew_sequences) |
|
|
decoder_input_data[:, 1:] = ew_sequences[:, :-1] |
|
|
decoder_input_data[:, 0] = 1 |
|
|
|
|
|
|
|
|
X_train_fr, X_test_fr, y_train, y_test = train_test_split(fr_sequences, decoder_input_data, test_size=0.2) |
|
|
|
|
|
|
|
|
model.fit([X_train_fr, y_train], np.expand_dims(y_train, -1), |
|
|
batch_size=64, |
|
|
epochs=30, |
|
|
validation_split=0.2) |
|
|
|
|
|
loss, accuracy = model.evaluate([X_test_fr, y_test], np.expand_dims(y_test, -1)) |
|
|
print(f'Test Loss: {loss}, Test Accuracy: {accuracy}') |
|
|
|
|
|
|
|
|
model.save('french_ewondo_translation_model.h5') |
|
|
|
|
|
def preprocess_input_sentence(sentence, word_to_index, max_length): |
|
|
|
|
|
tokens = sentence.split() |
|
|
token_indices = [word_to_index.get(word, 0) for word in tokens] |
|
|
|
|
|
padded_sequence = pad_sequences([token_indices], maxlen=max_length, padding='post') |
|
|
return padded_sequence |
|
|
|
|
|
def predict_translation(sentence, model, word_to_index_fr, index_to_word_ew, max_length_ew): |
|
|
|
|
|
input_sequence = preprocess_input_sentence(sentence, word_to_index_fr, max_length_fr) |
|
|
|
|
|
|
|
|
start_token = 1 |
|
|
decoder_input = np.zeros((1, max_length_ew)) |
|
|
decoder_input[0, 0] = start_token |
|
|
|
|
|
|
|
|
for i in range(1, max_length_ew): |
|
|
|
|
|
output_tokens = model.predict([input_sequence, decoder_input]) |
|
|
sampled_token_index = np.argmax(output_tokens[0, i-1, :]) |
|
|
decoder_input[0, i] = sampled_token_index |
|
|
|
|
|
|
|
|
if sampled_token_index == 2: |
|
|
break |
|
|
|
|
|
|
|
|
translated_sentence = ' '.join([index_to_word_ew.get(index, '') for index in decoder_input.flatten() if index > 0]) |
|
|
|
|
|
return translated_sentence |
|
|
|
|
|
import pandas as pd |
|
|
from keras.models import load_model |
|
|
|
|
|
|
|
|
model = load_model('french_ewondo_translation_model.h5') |
|
|
|
|
|
|
|
|
dictionary = pd.read_csv('french_ewondo_dictionary.csv', encoding='ISO-8859-1') |
|
|
french_to_ewondo = dict(zip(dictionary['French'], dictionary['Ewondo'])) |
|
|
|
|
|
def predict_translation(sentence): |
|
|
|
|
|
words = sentence.split() |
|
|
ewondo_words = [] |
|
|
|
|
|
for word in words: |
|
|
|
|
|
ewondo_word = french_to_ewondo.get(word.strip(",.!?;:\"'()[]"), word) |
|
|
ewondo_words.append(ewondo_word) |
|
|
|
|
|
return ' '.join(ewondo_words) |
|
|
|
|
|
|
|
|
french_sentence = "je suis Noa" |
|
|
ewondo_translation = predict_translation(french_sentence) |
|
|
|
|
|
print("Ewondo Translation:", ewondo_translation) |
|
|
|
|
|
model.summary() |
|
|
|
|
|
|