cosmos / app.py
njavidfar's picture
Update app.py
15621be verified
import os
os.environ["STREAMLIT_SERVER_PORT"] = "7860"
os.environ["STREAMLIT_SERVER_ADDRESS"] = "0.0.0.0"
import streamlit as st
import torch
from diffusers import FluxPipeline
# Set page config
st.set_page_config(
page_title="FLUX.1 Image Generator",
page_icon="🎨",
layout="wide"
)
@st.cache_resource
def load_pipeline():
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.float32 # Using float32 for CPU compatibility
)
pipe = pipe.to("cpu")
return pipe
def main():
st.title("🎨 FLUX.1 Image Generator")
st.write("Create high-quality images using FLUX.1 [dev] model")
# User inputs
prompt = st.text_area("Enter your image description:", height=100)
col1, col2, col3 = st.columns(3)
with col1:
width = st.select_slider(
"Image Width",
options=[512, 768, 1024],
value=768
)
with col2:
height = st.select_slider(
"Image Height",
options=[512, 768, 1024],
value=768
)
with col3:
guidance_scale = st.slider(
"Guidance Scale",
min_value=1.0,
max_value=7.0,
value=3.5,
step=0.5
)
steps = st.slider(
"Number of Steps",
min_value=20,
max_value=50,
value=30
)
if st.button("Generate Image"):
if prompt:
try:
pipe = load_pipeline()
with st.spinner("🎨 Generating your image... (this may take several minutes on CPU)"):
# Set a fixed seed for reproducibility
generator = torch.Generator("cpu").manual_seed(42)
image = pipe(
prompt,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=steps,
max_sequence_length=512,
generator=generator
).images[0]
# Display image
st.image(image, caption="Generated Image")
# Save and provide download option
img_path = "generated_image.png"
image.save(img_path)
with open(img_path, "rb") as file:
st.download_button(
label="Download Image",
data=file,
file_name="flux_generated.png",
mime="image/png"
)
os.remove(img_path)
except Exception as e:
st.error(f"An error occurred: {str(e)}")
else:
st.warning("Please enter a prompt first!")
st.markdown("---")
st.markdown("Powered by FLUX.1 [dev] model")
st.info("⚠️ Note: Generation may take several minutes as this runs on CPU")
# Add usage guidelines
with st.expander("Usage Guidelines"):
st.markdown("""
**Limitations:**
- This model is not intended to provide factual information
- May amplify existing societal biases
- May fail to generate output that matches prompts
- Prompt following depends on prompting-style
**Not allowed:**
- Generating illegal or harmful content
- Creating non-consensual content
- Harassing or bullying individuals
- Generating disinformation
""")
if __name__ == "__main__":
main()