|
|
import os |
|
|
import io |
|
|
import requests |
|
|
import gradio as gr |
|
|
from groq import Groq |
|
|
from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer |
|
|
from deep_translator import GoogleTranslator |
|
|
from PIL import Image, ImageDraw |
|
|
import joblib |
|
|
import time |
|
|
import torch |
|
|
import warnings |
|
|
from huggingface_hub import InferenceApi |
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
text_generation_model = AutoModelForCausalLM.from_pretrained("gpt2").to(device) |
|
|
text_generation_tokenizer = AutoTokenizer.from_pretrained("gpt2") |
|
|
|
|
|
|
|
|
text_generation_tokenizer.pad_token = text_generation_tokenizer.eos_token |
|
|
|
|
|
|
|
|
def process_audio(audio_path, image_option): |
|
|
if audio_path is None: |
|
|
return "Please upload an audio file.", None, None, None |
|
|
|
|
|
|
|
|
try: |
|
|
with open(audio_path, "rb") as file: |
|
|
transcription = client.audio.transcriptions.create( |
|
|
file=(os.path.basename(audio_path), file.read()), |
|
|
model="whisper-large-v3", |
|
|
language="ta", |
|
|
response_format="verbose_json", |
|
|
) |
|
|
tamil_text = transcription.text |
|
|
except Exception as e: |
|
|
return f"An error occurred during transcription: {str(e)}", None, None, None |
|
|
|
|
|
|
|
|
try: |
|
|
translator = GoogleTranslator(source='ta', target='en') |
|
|
translation = translator.translate(tamil_text) |
|
|
except Exception as e: |
|
|
return tamil_text, f"An error occurred during translation: {str(e)}", None, None |
|
|
|
|
|
|
|
|
def generate_creative_text(english_text): |
|
|
if not english_text: |
|
|
return "Please provide text to generate creative content." |
|
|
|
|
|
try: |
|
|
inputs = text_generation_tokenizer(english_text, return_tensors="pt", padding=True, truncation=True).to(device) |
|
|
generated_tokens = text_generation_model.generate( |
|
|
**inputs, |
|
|
max_length=60, |
|
|
num_return_sequences=1, |
|
|
no_repeat_ngram_size=3, |
|
|
temperature=0.7, |
|
|
top_p=0.9, |
|
|
do_sample=True, |
|
|
early_stopping=True |
|
|
) |
|
|
creative_text = text_generation_tokenizer.decode(generated_tokens[0], skip_special_tokens=True).strip() |
|
|
return creative_text |
|
|
except Exception as e: |
|
|
return f"An error occurred during text generation: {str(e)}" |
|
|
|
|
|
creative_text = generate_creative_text(translation) |
|
|
|
|
|
|
|
|
image = None |
|
|
if image_option == "Generate Image": |
|
|
try: |
|
|
image = pipe(translation).images[0] |
|
|
except Exception as e: |
|
|
return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None |
|
|
|
|
|
return tamil_text, translation, image, creative_text |
|
|
|
|
|
|
|
|
with gr.Blocks() as iface: |
|
|
gr.Markdown("# Audio Transcription, Translation, and Image Generation") |
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
audio_input = gr.Audio(type="filepath", label="Upload Audio File") |
|
|
image_option = gr.Dropdown(["Generate Image", "Skip Image"], label="Image Generation", value="Generate Image") |
|
|
submit_button = gr.Button("Process Audio") |
|
|
with gr.Column(): |
|
|
tamil_text_output = gr.Textbox(label="Tamil Transcription", interactive=False) |
|
|
translation_output = gr.Textbox(label="English Translation", interactive=False) |
|
|
image_output = gr.Image(label="Generated Image") |
|
|
creative_text_output = gr.Textbox(label="Creative Text", interactive=False) |
|
|
|
|
|
submit_button.click( |
|
|
fn=process_audio, |
|
|
inputs=[audio_input, image_option], |
|
|
outputs=[tamil_text_output, translation_output, image_output, creative_text_output] |
|
|
) |
|
|
|
|
|
|
|
|
iface.launch() |