Spaces:
Sleeping
Sleeping
| import os | |
| from huggingface_hub import login | |
| # Retrieve the actual token from the environment variable | |
| hf_token = os.getenv("HF_TOKEN") | |
| # Check if the token is retrieved properly | |
| if hf_token: | |
| # Use the retrieved token | |
| login(token=hf_token, add_to_git_credential=True) | |
| else: | |
| raise ValueError("Hugging Face token not found in environment variables.") | |
| # Import necessary libraries | |
| from transformers import MarianMTModel, MarianTokenizer, pipeline | |
| import requests | |
| import io | |
| from PIL import Image | |
| import matplotlib.pyplot as plt | |
| import gradio as gr | |
| # Load the translation model and tokenizer | |
| model_name = "Helsinki-NLP/opus-mt-mul-en" | |
| tokenizer = MarianTokenizer.from_pretrained(model_name) | |
| model = MarianMTModel.from_pretrained(model_name) | |
| # Create a translation pipeline | |
| translator = pipeline("translation", model=model, tokenizer=tokenizer) | |
| # Function for translation | |
| def translate_text(tamil_text): | |
| try: | |
| translation = translator(tamil_text, max_length=40) | |
| translated_text = translation[0]['translation_text'] | |
| return translated_text | |
| except Exception as e: | |
| return f"An error occurred: {str(e)}" | |
| # API credentials and endpoint for FLUX | |
| flux_API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev" | |
| flux_headers = {"Authorization": f"Bearer {hf_token}"} | |
| # Function to send payload and generate image | |
| def generate_image(prompt): | |
| try: | |
| response = requests.post(flux_API_URL, headers=flux_headers, json={"inputs": prompt}) | |
| # Check if the response is successful | |
| if response.status_code == 200: | |
| print("API call successful, generating image...") | |
| image_bytes = response.content | |
| # Try opening the image | |
| try: | |
| image = Image.open(io.BytesIO(image_bytes)) | |
| return image | |
| except Exception as e: | |
| print(f"Error opening image: {e}") | |
| return None | |
| else: | |
| print(f"Failed to get image: Status code {response.status_code}") | |
| print("Response content:", response.text) # Print response for debugging | |
| return None | |
| except Exception as e: | |
| print(f"An error occurred: {e}") | |
| return None | |
| # Function for Mistral API call to generate creative text | |
| mistral_API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-v0.1" | |
| mistral_headers = {"Authorization": f"Bearer {hf_token}"} | |
| def generate_creative_text(translated_text): | |
| try: | |
| response = requests.post(mistral_API_URL, headers=mistral_headers, json={"inputs": translated_text}) | |
| if response.status_code == 200: | |
| creative_text = response.json()[0]['generated_text'] | |
| return creative_text | |
| else: | |
| print(f"Failed to get creative text: Status code {response.status_code}") | |
| print("Response content:", response.text) # Print response for debugging | |
| return "Error generating creative text" | |
| except Exception as e: | |
| print(f"An error occurred: {e}") | |
| return None | |
| # Function to handle the full workflow | |
| def translate_generate_image_and_text(tamil_text): | |
| # Step 1: Translate Tamil text to English | |
| translated_text = translate_text(tamil_text) | |
| # Step 2: Generate an image based on the translated text | |
| image = generate_image(translated_text) | |
| # Step 3: Generate creative text based on the translated text | |
| creative_text = generate_creative_text(translated_text) | |
| return translated_text, creative_text, image | |
| # Create Gradio interface | |
| interface = gr.Interface( | |
| fn=translate_generate_image_and_text, | |
| inputs="text", | |
| outputs=["text", "text", "image"], | |
| title="Tamil to English Translation, Image Generation & Creative Text", | |
| description="Enter Tamil text to translate to English, generate an image, and create creative text based on the translation." | |
| ) | |
| # Launch Gradio app | |
| interface.launch() | |