|
|
import os |
|
|
import streamlit as st |
|
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
import torch |
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
|
|
|
|
|
HF_TOKEN = os.environ.get("stablediffussion") |
|
|
|
|
|
|
|
|
if not HF_TOKEN: |
|
|
st.error("Hugging Face token is missing. Please set it in your environment variables.") |
|
|
st.stop() |
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
def load_text_model(): |
|
|
model_name = "meta-llama/Llama-2-7b-chat-hf" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
device_map="cpu", |
|
|
torch_dtype=torch.float32, |
|
|
token=HF_TOKEN |
|
|
) |
|
|
|
|
|
return pipeline("text-generation", model=model, tokenizer=tokenizer, device=-1) |
|
|
|
|
|
|
|
|
@st.cache_resource |
|
|
def load_image_model(): |
|
|
model_id = "runwayml/stable-diffusion-v1-5" |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
|
model_id, torch_dtype=torch.float32, token=HF_TOKEN |
|
|
) |
|
|
pipe.to("cpu") |
|
|
|
|
|
return pipe |
|
|
|
|
|
|
|
|
llama_pipe = load_text_model() |
|
|
image_pipe = load_image_model() |
|
|
|
|
|
|
|
|
st.title("๐ Blog & Image Generator") |
|
|
|
|
|
topic = st.text_input("Enter a blog topic:", "The Future of AI in Healthcare") |
|
|
|
|
|
if st.button("Generate Blog"): |
|
|
with st.spinner("Generating Blog..."): |
|
|
response = llama_pipe(f"Write a detailed blog on {topic}:", max_length=800, truncation=True) |
|
|
blog = response[0]['generated_text'] |
|
|
|
|
|
st.subheader("Generated Blog") |
|
|
st.write(blog) |
|
|
|
|
|
|
|
|
keywords = " ".join(blog.split("\n")[:3]) |
|
|
|
|
|
|
|
|
with st.spinner("Generating Image..."): |
|
|
image = image_pipe(keywords).images[0] |
|
|
st.subheader("Generated Image") |
|
|
st.image(image, caption="AI-Generated Image", use_column_width=True) |
|
|
|