Mohansai2004 commited on
Commit
e7dde2b
·
1 Parent(s): 7d035ab

feat: optimize image generator for CPU performance

Browse files

- Switch to RunwayML SD v1.5 model
- Optimize CPU settings and memory usage
- Simplify user interface
- Reduce generation time to 1-2 minutes
- Fix CPU offloading issues

Files changed (3) hide show
  1. README.md +14 -14
  2. app.py +43 -50
  3. requirements.txt +3 -8
README.md CHANGED
@@ -1,25 +1,25 @@
1
  ---
2
- title: Fast Image Generator
3
- emoji: 🎨
4
  colorFrom: blue
5
- colorTo: pink
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
- short_description: CPU-optimized image generation using SD-v1.4
11
  ---
12
 
13
- # Fast Image Generator
14
- Quick image generation optimized for CPU using Stable Diffusion v1.4
15
 
16
  ## Features
17
- - Optimized for CPU usage
18
- - Fast generation (2-5 minutes)
19
- - Memory efficient
20
- - Stable performance
21
 
22
- ## Recommended Settings
23
- - Resolution: 384x384 for best speed/quality ratio
24
- - Steps: 30-50 for good quality
25
- - Use clear, simple prompts
 
1
  ---
2
+ title: AI Text Generator
3
+ emoji: 💬
4
  colorFrom: blue
5
+ colorTo: green
6
  sdk: streamlit
7
  sdk_version: 1.41.1
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Fast text generation using DistilGPT-2
11
  ---
12
 
13
+ # AI Text Generator
14
+ Quick text generation using DistilGPT-2
15
 
16
  ## Features
17
+ - Fast text generation
18
+ - CPU-optimized performance
19
+ - Adjustable creativity settings
20
+ - Memory efficient (< 2GB)
21
 
22
+ ## Usage Tips
23
+ - Clear, specific prompts work best
24
+ - Adjust temperature for different styles
25
+ - Experiment with prompt formats
app.py CHANGED
@@ -1,80 +1,73 @@
1
  import streamlit as st
2
- from diffusers import StableDiffusionPipeline
3
  import torch
4
  import gc
5
 
6
  @st.cache_resource
7
  def load_model():
8
- # Explicitly disable CUDA
9
- torch.cuda.is_available = lambda : False
10
-
11
- # Use RunwayML's smaller model
12
- model_id = "runwayml/stable-diffusion-v1-5"
13
- pipe = StableDiffusionPipeline.from_pretrained(
14
  model_id,
15
  torch_dtype=torch.float32,
16
- safety_checker=None,
17
- requires_safety_checker=False,
18
- use_safetensors=True
19
- )
20
 
21
- # Basic CPU optimizations
22
- pipe = pipe.to("cpu")
23
- pipe.enable_attention_slicing(slice_size=1)
24
- pipe.enable_vae_tiling()
25
  torch.set_num_threads(4)
26
-
27
- # Clear memory
28
  gc.collect()
29
 
30
- return pipe
31
 
32
- st.title("🎨 AI Image Generator")
33
- st.write("Create images from text descriptions")
34
 
35
  # Initialize model
36
- pipeline = load_model()
37
 
38
- # Simple interface
39
  prompt = st.text_area(
40
- "Describe your image:",
41
- "A professional photograph of a serene landscape at sunset, golden hour lighting, highly detailed"
42
  )
43
 
44
- if st.button("Generate Image"):
45
- with st.spinner("Creating your image... (1-2 minutes)"):
 
 
 
 
 
46
  try:
47
- gc.collect()
 
48
 
49
  with torch.inference_mode():
50
- image = pipeline(
51
- prompt=prompt,
52
- negative_prompt="ugly, blurry, low quality, distorted, disfigured",
53
- num_inference_steps=15, # Reduced for speed
54
- guidance_scale=7.0,
55
- width=256, # Reduced for memory
56
- height=256, # Reduced for memory
57
- ).images[0]
58
 
59
- # Display and download
60
- st.image(image, caption=prompt, use_column_width=True)
 
 
61
 
62
- # Save and offer download
63
- image.save("generated_image.png")
64
- with open("generated_image.png", "rb") as file:
65
- st.download_button(
66
- label="Download Image",
67
- data=file,
68
- file_name="generated_image.png",
69
- mime="image/png"
70
- )
71
 
72
  except Exception as e:
73
  st.error(f"Error: {str(e)}")
74
 
75
  st.markdown("""
76
- ### Tips for good results:
77
- - Be specific in your description
78
- - Include details about style and lighting
79
- - Keep descriptions concise
80
  """)
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import gc
5
 
6
  @st.cache_resource
7
  def load_model():
8
+ # Load DistilGPT-2
9
+ model_id = "distilgpt2"
10
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
11
+ model = AutoModelForCausalLM.from_pretrained(
 
 
12
  model_id,
13
  torch_dtype=torch.float32,
14
+ low_cpu_mem_usage=True
15
+ ).to("cpu")
 
 
16
 
17
+ # Set threading and memory optimizations
 
 
 
18
  torch.set_num_threads(4)
 
 
19
  gc.collect()
20
 
21
+ return model, tokenizer
22
 
23
+ st.title("💬 AI Text Generator")
24
+ st.write("Generate creative text using DistilGPT-2")
25
 
26
  # Initialize model
27
+ model, tokenizer = load_model()
28
 
29
+ # User input
30
  prompt = st.text_area(
31
+ "Enter your prompt:",
32
+ "Once upon a time in a digital world,"
33
  )
34
 
35
+ # Generation settings
36
+ with st.sidebar:
37
+ max_length = st.slider("Max Length", 50, 200, 100)
38
+ temperature = st.slider("Temperature", 0.1, 1.0, 0.7)
39
+
40
+ if st.button("Generate Text"):
41
+ with st.spinner("Generating text..."):
42
  try:
43
+ # Tokenize and generate
44
+ inputs = tokenizer(prompt, return_tensors="pt")
45
 
46
  with torch.inference_mode():
47
+ outputs = model.generate(
48
+ inputs["input_ids"],
49
+ max_length=max_length,
50
+ temperature=temperature,
51
+ num_return_sequences=1,
52
+ pad_token_id=tokenizer.eos_token_id,
53
+ do_sample=True,
54
+ )
55
 
56
+ # Decode and display
57
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
58
+ st.write("### Generated Text:")
59
+ st.write(generated_text)
60
 
61
+ # Add copy button
62
+ st.button("📋 Copy Text",
63
+ on_click=lambda: st.write(generated_text))
 
 
 
 
 
 
64
 
65
  except Exception as e:
66
  st.error(f"Error: {str(e)}")
67
 
68
  st.markdown("""
69
+ ### Tips for better results:
70
+ - Start with clear, well-structured prompts
71
+ - Adjust temperature for creativity (higher) or consistency (lower)
72
+ - Try different prompt styles for different outputs
73
  """)
requirements.txt CHANGED
@@ -1,11 +1,6 @@
1
  # Add any additional dependencies here
2
  # streamlit is already pre-installed
3
- pandas
4
- numpy
5
- transformers
6
  torch
7
- accelerate
8
- einops
9
- diffusers
10
- safetensors
11
- scipy
 
1
  # Add any additional dependencies here
2
  # streamlit is already pre-installed
3
+ streamlit
 
 
4
  torch
5
+ transformers
6
+ accelerate