Dua Rajper commited on
Commit
936c3d7
·
verified ·
1 Parent(s): 8e1b005

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -25
app.py CHANGED
@@ -1,30 +1,33 @@
1
  import streamlit as st
2
- from diffusers import DiffusionPipeline
 
3
  import torch
4
 
5
- # Load the pre-trained model
6
  @st.cache_resource
7
  def load_model():
8
- # Load the Stable Diffusion model from Hugging Face
9
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16)
10
- pipe.to("cuda") # Make sure to move the model to GPU if available
11
- return pipe
12
-
13
- # Initialize the model
14
- pipe = load_model()
15
-
16
- # Streamlit Interface
17
- st.title("Text to Image Generator using Stable Diffusion 2.1")
18
- st.write("Enter a description below, and the model will generate an image based on it.")
19
-
20
- # Text input field for user to enter prompt
21
- user_input = st.text_area("Enter the text prompt", "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k")
22
-
23
- if st.button("Generate Image"):
24
- if user_input:
25
- with st.spinner("Generating image..."):
26
- # Generate image based on user input
27
- generated_image = pipe(user_input).images[0]
28
- st.image(generated_image)
29
- else:
30
- st.error("Please enter a text prompt!")
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
+ from diffusers import StableDiffusionPipeline
4
  import torch
5
 
6
+ # Load the pre-trained Stable Diffusion model
7
  @st.cache_resource
8
  def load_model():
9
+ # You can load any Stable Diffusion model here
10
+ model = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v-1-4-original")
11
+ model.to("cpu") # Ensure it uses CPU for inference
12
+ return model
13
+
14
+ # Initialize Streamlit components
15
+ st.title("Text-to-Image Generator")
16
+
17
+ st.write("Enter a description and generate an image!")
18
+
19
+ # User input for text prompt
20
+ prompt = st.text_input("Enter your text prompt here:")
21
+
22
+ if prompt:
23
+ st.write("Generating image... Please wait.")
24
+
25
+ # Load model once (from the Hugging Face model hub)
26
+ model = load_model()
27
+
28
+ # Generate the image from the text prompt
29
+ with torch.no_grad():
30
+ image = model(prompt).images[0]
31
+
32
+ # Show the generated image in the app
33
+ st.image(image, caption="Generated Image", use_column_width=True)