Update app.py
Browse files
app.py
CHANGED
|
@@ -2,8 +2,10 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 4 |
from huggingface_hub import HfApi
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
# Fetch and verify the Hugging Face token
|
| 7 |
huggingface_token = os.getenv('NJOGERERA_TOKEN')
|
| 8 |
if not huggingface_token:
|
| 9 |
raise ValueError("Hugging Face token is not set in the environment variables.")
|
|
@@ -15,21 +17,55 @@ try:
|
|
| 15 |
except Exception as e:
|
| 16 |
raise ValueError("Failed to authenticate with the provided Hugging Face token.")
|
| 17 |
|
| 18 |
-
# Load tokenizer and model manually with token
|
| 19 |
model_path = "vertigo23/njogerera_translation_model_V003"
|
| 20 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_auth_token=huggingface_token)
|
| 21 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_path, use_auth_token=huggingface_token)
|
| 22 |
|
| 23 |
-
# Create the pipeline
|
| 24 |
translator = pipeline("translation", model=model, tokenizer=tokenizer)
|
| 25 |
|
| 26 |
prefix = "translate Luganda to English: "
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
def translate_lg_to_en(text):
|
| 29 |
lg_input = prefix + text
|
| 30 |
translated_text = translator(lg_input)
|
| 31 |
english_translation = translated_text[0]['translation_text']
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
return english_translation, ksl_path
|
| 34 |
|
| 35 |
# Gradio interface
|
|
@@ -37,12 +73,13 @@ gr.Interface(
|
|
| 37 |
fn=translate_lg_to_en,
|
| 38 |
inputs=gr.Text(),
|
| 39 |
outputs=[gr.Textbox(label="English Translation"), gr.Video(label="KSL Sign Language Animation")],
|
| 40 |
-
# outputs=gr.Textbox(label="English Translation"),
|
| 41 |
title="Njogerera Translation App",
|
| 42 |
description="Type in a Luganda sentence and see the translation.",
|
| 43 |
article="Above is some sample text to test the results of the model. Click to see the results.",
|
| 44 |
examples=[
|
| 45 |
["Ebikolwa ebitali bya buntu tebikkirizibwa mu kitundu."],
|
|
|
|
|
|
|
| 46 |
],
|
| 47 |
allow_flagging="never"
|
| 48 |
).launch()
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
| 4 |
from huggingface_hub import HfApi
|
| 5 |
+
import string
|
| 6 |
+
import os
|
| 7 |
+
from moviepy.editor import VideoFileClip, concatenate_videoclips, ImageClip
|
| 8 |
|
|
|
|
| 9 |
huggingface_token = os.getenv('NJOGERERA_TOKEN')
|
| 10 |
if not huggingface_token:
|
| 11 |
raise ValueError("Hugging Face token is not set in the environment variables.")
|
|
|
|
| 17 |
except Exception as e:
|
| 18 |
raise ValueError("Failed to authenticate with the provided Hugging Face token.")
|
| 19 |
|
|
|
|
| 20 |
model_path = "vertigo23/njogerera_translation_model_V003"
|
| 21 |
tokenizer = AutoTokenizer.from_pretrained(model_path, use_auth_token=huggingface_token)
|
| 22 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_path, use_auth_token=huggingface_token)
|
| 23 |
|
|
|
|
| 24 |
translator = pipeline("translation", model=model, tokenizer=tokenizer)
|
| 25 |
|
| 26 |
prefix = "translate Luganda to English: "
|
| 27 |
|
| 28 |
+
filler_image_path = "alphabet/break.png"
|
| 29 |
+
|
| 30 |
+
def clean_and_split(text):
|
| 31 |
+
text = text.lower().translate(str.maketrans('', '', string.punctuation))
|
| 32 |
+
return text.split()
|
| 33 |
+
|
| 34 |
+
def map_word_to_media(word):
|
| 35 |
+
if os.path.exists(f"KSL/{word}.mp4"):
|
| 36 |
+
return [f"KSL/{word}.mp4"]
|
| 37 |
+
else:
|
| 38 |
+
spelled_word_media = [filler_image_path]
|
| 39 |
+
spelled_word_media += [f"alphabet/{letter}.png" for letter in word if os.path.exists(f"alphabet/{letter}.png")]
|
| 40 |
+
spelled_word_media.append(filler_image_path)
|
| 41 |
+
return spelled_word_media
|
| 42 |
+
|
| 43 |
+
def stitch_media(media_paths):
|
| 44 |
+
clips = []
|
| 45 |
+
for path in media_paths:
|
| 46 |
+
if path.endswith('.mp4'):
|
| 47 |
+
clips.append(VideoFileClip(path))
|
| 48 |
+
elif path.endswith('.png'):
|
| 49 |
+
image_clip = ImageClip(path).set_duration(0.7)
|
| 50 |
+
clips.append(image_clip)
|
| 51 |
+
if not clips:
|
| 52 |
+
raise ValueError("No media files to stitch.")
|
| 53 |
+
|
| 54 |
+
final_clip = concatenate_videoclips(clips, method="compose")
|
| 55 |
+
final_clip.fps = 24
|
| 56 |
+
final_clip_path = "KSL/final_translation.mp4"
|
| 57 |
+
final_clip.write_videofile(final_clip_path, codec="libx264", fps=24)
|
| 58 |
+
return final_clip_path
|
| 59 |
+
|
| 60 |
def translate_lg_to_en(text):
|
| 61 |
lg_input = prefix + text
|
| 62 |
translated_text = translator(lg_input)
|
| 63 |
english_translation = translated_text[0]['translation_text']
|
| 64 |
+
words = clean_and_split(english_translation)
|
| 65 |
+
media_paths = []
|
| 66 |
+
for word in words:
|
| 67 |
+
media_paths.extend(map_word_to_media(word))
|
| 68 |
+
ksl_path = stitch_media(media_paths)
|
| 69 |
return english_translation, ksl_path
|
| 70 |
|
| 71 |
# Gradio interface
|
|
|
|
| 73 |
fn=translate_lg_to_en,
|
| 74 |
inputs=gr.Text(),
|
| 75 |
outputs=[gr.Textbox(label="English Translation"), gr.Video(label="KSL Sign Language Animation")],
|
|
|
|
| 76 |
title="Njogerera Translation App",
|
| 77 |
description="Type in a Luganda sentence and see the translation.",
|
| 78 |
article="Above is some sample text to test the results of the model. Click to see the results.",
|
| 79 |
examples=[
|
| 80 |
["Ebikolwa ebitali bya buntu tebikkirizibwa mu kitundu."],
|
| 81 |
+
["Olugudo olugenda e Masaka lugadwawo."],
|
| 82 |
+
["Abalwadde ba Malaria mu dwaliro lye Nsambya bafunye obujanjabi."],
|
| 83 |
],
|
| 84 |
allow_flagging="never"
|
| 85 |
).launch()
|