| import gradio as gr |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| from diffusers import StableDiffusionPipeline |
|
|
| |
| chat_model_id = "Qwen/Qwen2.5-1.5B-Instruct" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(chat_model_id) |
| chat_model = AutoModelForCausalLM.from_pretrained( |
| chat_model_id, |
| torch_dtype=torch.float32 |
| ) |
|
|
| def chat_fn(message, history): |
| messages = [{"role": "user", "content": message}] |
| prompt = tokenizer.apply_chat_template( |
| messages, |
| tokenize=False, |
| add_generation_prompt=True |
| ) |
|
|
| inputs = tokenizer(prompt, return_tensors="pt") |
| output = chat_model.generate( |
| **inputs, |
| max_new_tokens=200, |
| temperature=0.7 |
| ) |
| return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
| |
| |
| from diffusers import StableDiffusionPipeline |
|
|
| img_pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/sdxl-turbo", |
| torch_dtype=torch.float32 |
| ) |
|
|
| img_pipe = img_pipe.to("cpu") |
|
|
|
|
| |
| with gr.Blocks( |
| title="MaindAI", |
| theme=gr.themes.Soft(primary_hue="blue") |
| ) as app: |
|
|
| gr.Image( |
| value="Maindai.png", |
| show_label=False, |
| height=140 |
| ) |
|
|
| gr.Markdown( |
| "<h1 style='text-align:center;color:#4da6ff;'>MaindAI</h1>" |
| ) |
|
|
| with gr.Row(): |
| with gr.Column(scale=1): |
| gr.ChatInterface(chat_fn) |
|
|
| with gr.Column(scale=1): |
| prompt = gr.Textbox(label="Görsel açıklaması") |
| btn = gr.Button("🎨 Oluştur") |
| output = gr.Image() |
| btn.click(generate_image, prompt, output) |
|
|
|
|