DetectiveShadow commited on
Commit
41c343f
·
verified ·
1 Parent(s): bc11304

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -24
app.py CHANGED
@@ -1,30 +1,25 @@
1
  import gradio as gr
2
- import torch
3
- from diffusers import DiffusionPipeline
4
 
5
- # Load model once
6
- pipe = DiffusionPipeline.from_pretrained(
7
- "stabilityai/stable-diffusion-xl-base-1.0",
8
- torch_dtype=torch.float16,
9
- variant="fp16",
10
- use_safetensors=True
11
- ).to("cuda")
12
 
13
- # Load the LoRA weights
14
- pipe.load_lora_weights("ZB-Tech/Text-to-Image")
 
 
15
 
16
- # Define generation function
17
- def generate_image(prompt):
18
- image = pipe(prompt).images[0]
19
- return image
20
-
21
- # Gradio UI
22
- demo = gr.Interface(
23
- fn=generate_image,
24
- inputs=gr.Textbox(label="Enter a prompt", placeholder="e.g. Draw a picture of some coding on a screen with a horror background"),
25
- outputs=gr.Image(type="pil", label="Generated Image"),
26
- title="🎨 AI Image Generator",
27
- description="This demo uses Stable Diffusion XL with custom LoRA weights to generate images from your prompts."
28
  )
29
 
30
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
3
 
4
+ # Load the model from the hub
5
+ generator = pipeline("text2text-generation", model="DetectiveShadow/inspiration-message-generator")
 
 
 
 
 
6
 
7
+ def generate_message(age, profession, archetype):
8
+ prompt = f"Age: {age} | Profession: {profession} | Archetype: {archetype}"
9
+ result = generator(prompt, max_new_tokens=100)[0]["generated_text"]
10
+ return result
11
 
12
+ # Gradio interface
13
+ iface = gr.Interface(
14
+ fn=generate_message,
15
+ inputs=[
16
+ gr.Textbox(label="Age"),
17
+ gr.Textbox(label="Profession"),
18
+ gr.Textbox(label="Archetype")
19
+ ],
20
+ outputs="text",
21
+ title="Inspiration Message Generator",
22
+ description="Enter age, profession, and archetype to get a personalized inspirational message."
 
23
  )
24
 
25
+ iface.launch()