File size: 2,779 Bytes
414c785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
import requests
import os
import io
from PIL import Image
from typing import Optional


# Load Hugging Face Token from environment variables
def load_environment():
    return os.getenv("HF_TOKEN")


# Enhance the text prompt for better photorealistic results
def craft_realistic_prompt(base_prompt: str) -> str:
    realistic_modifiers = [
        "photorealistic", "high resolution", "sharp focus",
        "professional photography", "natural lighting", "detailed textures"
    ]
    return f"{' '.join(realistic_modifiers)}, {base_prompt}, shot on professional camera, 8k resolution"


# Query Hugging Face API to generate image from text
def query_hf_api(prompt: str) -> Optional[bytes]:
    model_url = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
    HF_TOKEN = load_environment()

    if not HF_TOKEN:
        raise ValueError("Hugging Face token not found. Set HF_TOKEN in environment variables.")

    headers = {"Authorization": f"Bearer {HF_TOKEN}", "Content-Type": "application/json"}
    payload = {
        "inputs": craft_realistic_prompt(prompt),
        "parameters": {
            "negative_prompt": "cartoon, anime, low quality, bad anatomy, blurry, unrealistic, painting, drawing, sketch",
            "num_inference_steps": 75,
            "guidance_scale": 8.5,
        }
    }

    response = requests.post(model_url, headers=headers, json=payload, timeout=120)
    response.raise_for_status()
    return response.content


# Generate image from text and return as PIL image
def generate_image(prompt: str):
    try:
        image_bytes = query_hf_api(prompt)
        image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
        return image
    except Exception as e:
        return f"Error: {str(e)}"


# Modern Gradio UI using Blocks
with gr.Blocks() as demo:
    gr.Markdown("# 🎨 Wangoes Text-to-Image Generator")
    gr.Markdown("Generate stunning AI images from text using Hugging Face models.")

    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(label="Enter your prompt",
                                      placeholder="Describe the image you want to generate...")
            generate_button = gr.Button("Generate Image", variant="primary")

        with gr.Column():
            image_output = gr.Image(label="Generated Image", type="pil")

    gr.Examples(
        examples=[
            "A futuristic cityscape at sunset with flying cars",
            "A serene mountain lake with crystal clear water",
            "A cozy cabin in the woods during winter"
        ],
        inputs=prompt_input
    )

    generate_button.click(fn=generate_image, inputs=prompt_input, outputs=image_output)

if __name__ == "__main__":
    demo.launch()