druvx13 commited on
Commit
41b8e2f
·
verified ·
1 Parent(s): aa4347c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -41
app.py CHANGED
@@ -1,57 +1,49 @@
1
  import gradio as gr
2
- import random
3
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
4
- from diffusers import StableDiffusionPipeline
5
  import torch
6
 
7
- # Load models once at startup
8
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
9
- text_model = GPT2LMHeadModel.from_pretrained("gpt2").to("cuda" if torch.cuda.is_available() else "cpu")
10
- image_pipe = StableDiffusionPipeline.from_pretrained(
11
- "stabilityai/stable-diffusion-2",
12
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
13
- revision="fp16" if torch.cuda.is_available() else None
14
- ).to("cuda" if torch.cuda.is_available() else "cpu")
15
 
16
- # Random theme generator
17
- themes = ["Surreal Dreamscape", "Cyberpunk Forest", "Alien Cuisine", "Time Travel Paradox", "Quantum Zoo"]
18
-
19
- def generate_random_story(theme, length):
20
- prompt = f"A surreal story about {theme}:"
21
- inputs = tokenizer(prompt, return_tensors="pt").to(text_model.device)
22
- outputs = text_model.generate(
23
- **inputs,
24
- max_length=length,
25
- temperature=0.95,
26
- do_sample=True,
27
- pad_token_id=tokenizer.eos_token_id
28
- )
29
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
30
 
31
- def generate_image(prompt):
32
- with torch.inference_mode():
33
- return image_pipe(prompt, num_inference_steps=20).images[0]
 
34
 
35
  with gr.Blocks(theme="soft") as demo:
36
- gr.Markdown("## 🎲 Random AI Experience Generator\n*Creates weird and wonderful combinations*")
37
 
38
  with gr.Row():
39
- theme = gr.Dropdown(choices=themes, label="Select Theme", value=random.choice(themes))
40
- length = gr.Slider(50, 200, value=100, label="Story Length (words)")
41
-
42
- story_output = gr.Textbox(label="AI-Generated Story")
43
- image_output = gr.Image(label="Visual Interpretation")
44
 
45
- def full_generation(selected_theme):
46
- story = generate_random_story(selected_theme, length.value)
47
- image = generate_image(f"Surrealistic {selected_theme}")
48
- return story, image
49
 
50
- generate_btn = gr.Button("✨ Generate Random Experience")
51
  generate_btn.click(
52
- fn=full_generation,
53
- inputs=[theme],
54
- outputs=[story_output, image_output]
55
  )
56
 
57
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
3
  import torch
4
 
5
+ # Load tiniest possible model (24M parameters)
6
+ model_name = "microsoft/phi-1_5"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_name,
10
+ torch_dtype=torch.float16,
11
+ device_map="auto"
12
+ )
13
 
14
+ def generate(prompt):
15
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
16
+ with torch.inference_mode():
17
+ outputs = model.generate(
18
+ **inputs,
19
+ max_new_tokens=30,
20
+ temperature=0.7,
21
+ do_sample=True
22
+ )
 
 
 
 
23
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
 
25
+ # UI components
26
+ emoji = "🔮"
27
+ title = "Tiny Fortune Cookie Generator"
28
+ description = "Uses Microsoft Phi-1.5 (24M params) to generate micro-wisdom"
29
 
30
  with gr.Blocks(theme="soft") as demo:
31
+ gr.Markdown(f"## {emoji} {title}\n{description}")
32
 
33
  with gr.Row():
34
+ prompt_input = gr.Textbox(
35
+ value="What's my fortune today?",
36
+ label="Input Prompt",
37
+ placeholder="Ask your question..."
38
+ )
39
 
40
+ output_text = gr.Textbox(label="Your Fortune")
41
+ generate_btn = gr.Button("Open Cookie 🍪", variant="primary")
 
 
42
 
 
43
  generate_btn.click(
44
+ fn=generate,
45
+ inputs=prompt_input,
46
+ outputs=output_text
47
  )
48
 
49
  demo.launch()