Nrnaidu's picture
Update app.py
43a9bb7 verified
import os
import io
import requests
import gradio as gr
from groq import Groq
from transformers import MarianMTModel, MarianTokenizer, AutoModelForCausalLM, AutoTokenizer
from deep_translator import GoogleTranslator
from PIL import Image, ImageDraw
import joblib
import time
import torch
import warnings
from huggingface_hub import InferenceApi
from diffusers import StableDiffusionPipeline
# Load text generation model and tokenizer
device = "cuda" if torch.cuda.is_available() else "cpu"
text_generation_model = AutoModelForCausalLM.from_pretrained("gpt2").to(device) # Move model to the correct device
text_generation_tokenizer = AutoTokenizer.from_pretrained("gpt2")
# Set the padding token
text_generation_tokenizer.pad_token = text_generation_tokenizer.eos_token # Use EOS token as padding token
# Function to transcribe, translate, analyze sentiment, and generate image
def process_audio(audio_path, image_option):
if audio_path is None:
return "Please upload an audio file.", None, None, None
# Step 1: Transcribe audio
try:
with open(audio_path, "rb") as file:
transcription = client.audio.transcriptions.create(
file=(os.path.basename(audio_path), file.read()),
model="whisper-large-v3",
language="ta",
response_format="verbose_json",
)
tamil_text = transcription.text
except Exception as e:
return f"An error occurred during transcription: {str(e)}", None, None, None
# Step 2: Translate Tamil to English
try:
translator = GoogleTranslator(source='ta', target='en')
translation = translator.translate(tamil_text)
except Exception as e:
return tamil_text, f"An error occurred during translation: {str(e)}", None, None
# Step 3: Generate creative text
def generate_creative_text(english_text):
if not english_text:
return "Please provide text to generate creative content."
try:
inputs = text_generation_tokenizer(english_text, return_tensors="pt", padding=True, truncation=True).to(device) # Move inputs to the same device
generated_tokens = text_generation_model.generate(
**inputs,
max_length=60,
num_return_sequences=1,
no_repeat_ngram_size=3,
temperature=0.7,
top_p=0.9,
do_sample=True,
early_stopping=True
)
creative_text = text_generation_tokenizer.decode(generated_tokens[0], skip_special_tokens=True).strip()
return creative_text
except Exception as e:
return f"An error occurred during text generation: {str(e)}"
creative_text = generate_creative_text(translation)
# Step 4: Generate image (if selected)
image = None
if image_option == "Generate Image":
try:
image = pipe(translation).images[0]
except Exception as e:
return tamil_text, translation, f"An error occurred during image generation: {str(e)}", None
return tamil_text, translation, image, creative_text
# Create Gradio interface
with gr.Blocks() as iface:
gr.Markdown("# Audio Transcription, Translation, and Image Generation")
with gr.Row():
with gr.Column():
audio_input = gr.Audio(type="filepath", label="Upload Audio File")
image_option = gr.Dropdown(["Generate Image", "Skip Image"], label="Image Generation", value="Generate Image")
submit_button = gr.Button("Process Audio")
with gr.Column():
tamil_text_output = gr.Textbox(label="Tamil Transcription", interactive=False)
translation_output = gr.Textbox(label="English Translation", interactive=False)
image_output = gr.Image(label="Generated Image")
creative_text_output = gr.Textbox(label="Creative Text", interactive=False)
submit_button.click(
fn=process_audio,
inputs=[audio_input, image_option],
outputs=[tamil_text_output, translation_output, image_output, creative_text_output]
)
# Launch the interface
iface.launch()