v1 / app.py
vishash's picture
Update app.py
de6656f verified
import os
import streamlit as st
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
from diffusers import StableDiffusionPipeline
# Get Hugging Face Token from Environment Variable
HF_TOKEN = os.environ.get("stablediffussion")
# Ensure the token is set
if not HF_TOKEN:
st.error("Hugging Face token is missing. Please set it in your environment variables.")
st.stop()
# Load the text model
@st.cache_resource
def load_text_model():
model_name = "meta-llama/Llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="cpu", # Run on CPU
torch_dtype=torch.float32, # Use float32 for CPU
token=HF_TOKEN
)
return pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) # Force CPU
# Load the image model (Stable Diffusion)
@st.cache_resource
def load_image_model():
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(
model_id, torch_dtype=torch.float32, token=HF_TOKEN
)
pipe.to("cpu") # Use CPU instead of GPU
return pipe
# Load Models
llama_pipe = load_text_model()
image_pipe = load_image_model()
# Streamlit UI
st.title("๐Ÿš€ Blog & Image Generator")
topic = st.text_input("Enter a blog topic:", "The Future of AI in Healthcare")
if st.button("Generate Blog"):
with st.spinner("Generating Blog..."):
response = llama_pipe(f"Write a detailed blog on {topic}:", max_length=800, truncation=True)
blog = response[0]['generated_text']
st.subheader("Generated Blog")
st.write(blog)
# Extract first 3 lines for image keywords
keywords = " ".join(blog.split("\n")[:3])
# Generate Image
with st.spinner("Generating Image..."):
image = image_pipe(keywords).images[0]
st.subheader("Generated Image")
st.image(image, caption="AI-Generated Image", use_column_width=True)