File size: 2,080 Bytes
1fa0eaa 1cab52a 1fa0eaa 1cab52a 3fdce00 de6656f 3fdce00 7e44da7 1cab52a 3fdce00 1cab52a 1fa0eaa 3fdce00 1cab52a 7e44da7 1fa0eaa 1cab52a 1fa0eaa 1cab52a 7e44da7 1fa0eaa 7e44da7 1fa0eaa 7e44da7 1cab52a 3fdce00 1cab52a 3fdce00 1cab52a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | import os
import streamlit as st
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
import torch
from diffusers import StableDiffusionPipeline
# Get Hugging Face Token from Environment Variable
HF_TOKEN = os.environ.get("stablediffussion")
# Ensure the token is set
if not HF_TOKEN:
st.error("Hugging Face token is missing. Please set it in your environment variables.")
st.stop()
# Load the text model
@st.cache_resource
def load_text_model():
model_name = "meta-llama/Llama-2-7b-chat-hf"
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="cpu", # Run on CPU
torch_dtype=torch.float32, # Use float32 for CPU
token=HF_TOKEN
)
return pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) # Force CPU
# Load the image model (Stable Diffusion)
@st.cache_resource
def load_image_model():
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(
model_id, torch_dtype=torch.float32, token=HF_TOKEN
)
pipe.to("cpu") # Use CPU instead of GPU
return pipe
# Load Models
llama_pipe = load_text_model()
image_pipe = load_image_model()
# Streamlit UI
st.title("🚀 Blog & Image Generator")
topic = st.text_input("Enter a blog topic:", "The Future of AI in Healthcare")
if st.button("Generate Blog"):
with st.spinner("Generating Blog..."):
response = llama_pipe(f"Write a detailed blog on {topic}:", max_length=800, truncation=True)
blog = response[0]['generated_text']
st.subheader("Generated Blog")
st.write(blog)
# Extract first 3 lines for image keywords
keywords = " ".join(blog.split("\n")[:3])
# Generate Image
with st.spinner("Generating Image..."):
image = image_pipe(keywords).images[0]
st.subheader("Generated Image")
st.image(image, caption="AI-Generated Image", use_column_width=True)
|