Favourez's picture
Upload 3 files
b6f2a20 verified
# -*- coding: utf-8 -*-
"""FinalTRi.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1hHv74seqk9eYq4JBX2saCvQZqt6xu8-P
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense, Embedding, AdditiveAttention, Concatenate
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Load your dataset with a specific encoding
def load_data(file_path):
try:
df = pd.read_csv(file_path, encoding='ISO-8859-1') # Try 'latin1' or 'utf-16' if needed
except UnicodeDecodeError:
print("Error reading the file. Please check the encoding.")
return [], []
return df['French'].tolist(), df['Ewondo'].tolist()
# Preprocess the data
def preprocess_data(french_sentences, ewondo_sentences):
tokenizer_fr = Tokenizer()
tokenizer_fr.fit_on_texts(french_sentences)
vocab_size_fr = len(tokenizer_fr.word_index) + 1
tokenizer_ew = Tokenizer()
tokenizer_ew.fit_on_texts(ewondo_sentences)
vocab_size_ew = len(tokenizer_ew.word_index) + 1
# Convert sentences to sequences
fr_sequences = tokenizer_fr.texts_to_sequences(french_sentences)
ew_sequences = tokenizer_ew.texts_to_sequences(ewondo_sentences)
# Pad sequences
max_length_fr = max(len(seq) for seq in fr_sequences)
max_length_ew = max(len(seq) for seq in ew_sequences)
fr_sequences = pad_sequences(fr_sequences, maxlen=max_length_fr, padding='post')
ew_sequences = pad_sequences(ew_sequences, maxlen=max_length_ew, padding='post')
return fr_sequences, ew_sequences, vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew
# Load and preprocess the data
french_sentences, ewondo_sentences = load_data('french_ewondo_dictionary.csv')
if french_sentences and ewondo_sentences:
fr_sequences, ew_sequences, vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew = preprocess_data(french_sentences, ewondo_sentences)
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Embedding, Dense
def create_model(vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew):
# Encoder
encoder_inputs = Input(shape=(max_length_fr,))
encoder_embedding = Embedding(vocab_size_fr, 256)(encoder_inputs)
encoder_lstm = LSTM(256, return_sequences=True, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding)
encoder_states = [state_h, state_c]
# Decoder
decoder_inputs = Input(shape=(max_length_ew,))
decoder_embedding = Embedding(vocab_size_ew, 256)(decoder_inputs)
decoder_lstm = LSTM(256, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
decoder_dense = Dense(vocab_size_ew, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
return model
model = create_model(vocab_size_fr, vocab_size_ew, max_length_fr, max_length_ew)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
import numpy as np
# Shift the Ewondo sequences for decoder input
decoder_input_data = np.zeros_like(ew_sequences)
decoder_input_data[:, 1:] = ew_sequences[:, :-1] # Shifted sequence
decoder_input_data[:, 0] = 1 # Assuming '1' is the start token
# Train-test split
X_train_fr, X_test_fr, y_train, y_test = train_test_split(fr_sequences, decoder_input_data, test_size=0.2)
# Fit the model
model.fit([X_train_fr, y_train], np.expand_dims(y_train, -1),
batch_size=64,
epochs=30,
validation_split=0.2)
loss, accuracy = model.evaluate([X_test_fr, y_test], np.expand_dims(y_test, -1))
print(f'Test Loss: {loss}, Test Accuracy: {accuracy}')
# Save the model
model.save('french_ewondo_translation_model.h5') # You can choose a different name
def preprocess_input_sentence(sentence, word_to_index, max_length):
# Tokenize the sentence
tokens = sentence.split()
token_indices = [word_to_index.get(word, 0) for word in tokens] # 0 for unknown words
# Pad the sequence
padded_sequence = pad_sequences([token_indices], maxlen=max_length, padding='post')
return padded_sequence
def predict_translation(sentence, model, word_to_index_fr, index_to_word_ew, max_length_ew):
# Preprocess the input sentence
input_sequence = preprocess_input_sentence(sentence, word_to_index_fr, max_length_fr)
# Prepare the decoder input with the start token (assumed to be 1)
start_token = 1 # Assuming 1 is the start token
decoder_input = np.zeros((1, max_length_ew))
decoder_input[0, 0] = start_token
# Generate predictions
for i in range(1, max_length_ew):
# Predict the next word
output_tokens = model.predict([input_sequence, decoder_input])
sampled_token_index = np.argmax(output_tokens[0, i-1, :]) # Get the most likely word
decoder_input[0, i] = sampled_token_index # Add to the decoder input
# Stop if the end token is predicted (assumed to be 2)
if sampled_token_index == 2: # Assuming 2 is the end token
break
# Convert indices to Ewondo words
translated_sentence = ' '.join([index_to_word_ew.get(index, '') for index in decoder_input.flatten() if index > 0])
return translated_sentence
import pandas as pd
from keras.models import load_model
# Load your trained model
model = load_model('french_ewondo_translation_model.h5') # Change to your model's path
# Load the French-Ewondo dictionary with the specified encoding
dictionary = pd.read_csv('french_ewondo_dictionary.csv', encoding='ISO-8859-1')
french_to_ewondo = dict(zip(dictionary['French'], dictionary['Ewondo']))
def predict_translation(sentence):
# Split the sentence into words
words = sentence.split()
ewondo_words = []
for word in words:
# Get the translation from the dictionary
ewondo_word = french_to_ewondo.get(word.strip(",.!?;:\"'()[]"), word) # Default to the original word if not found
ewondo_words.append(ewondo_word)
return ' '.join(ewondo_words)
# Example usage
french_sentence = "je suis Noa" # Replace with your input sentence
ewondo_translation = predict_translation(french_sentence)
print("Ewondo Translation:", ewondo_translation)
model.summary()