Update app.py
Browse files
app.py
CHANGED
|
@@ -3,29 +3,39 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, BitsAndB
|
|
| 3 |
import torch
|
| 4 |
from diffusers import StableDiffusionPipeline
|
| 5 |
|
|
|
|
| 6 |
# Load the text model
|
| 7 |
@st.cache_resource
|
| 8 |
def load_text_model():
|
| 9 |
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
|
|
|
| 10 |
bnb_config = BitsAndBytesConfig(
|
| 11 |
load_in_4bit=True,
|
| 12 |
bnb_4bit_compute_dtype=torch.float16,
|
| 13 |
bnb_4bit_use_double_quant=True
|
| 14 |
)
|
| 15 |
-
|
|
|
|
| 16 |
model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
model_name,
|
|
|
|
| 18 |
quantization_config=bnb_config,
|
| 19 |
-
device_map="auto"
|
|
|
|
| 20 |
)
|
|
|
|
| 21 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 22 |
|
| 23 |
# Load the image model
|
| 24 |
@st.cache_resource
|
| 25 |
def load_image_model():
|
| 26 |
model_id = "runwayml/stable-diffusion-v1-5"
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
| 28 |
pipe.to("cuda") # Use GPU
|
|
|
|
| 29 |
return pipe
|
| 30 |
|
| 31 |
llama_pipe = load_text_model()
|
|
@@ -50,4 +60,3 @@ if st.button("Generate Blog"):
|
|
| 50 |
image = image_pipe(keywords).images[0]
|
| 51 |
st.subheader("Generated Image")
|
| 52 |
st.image(image, caption="AI-Generated Image", use_column_width=True)
|
| 53 |
-
|
|
|
|
| 3 |
import torch
|
| 4 |
from diffusers import StableDiffusionPipeline
|
| 5 |
|
| 6 |
+
|
| 7 |
# Load the text model
|
| 8 |
@st.cache_resource
|
| 9 |
def load_text_model():
|
| 10 |
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
| 11 |
+
|
| 12 |
bnb_config = BitsAndBytesConfig(
|
| 13 |
load_in_4bit=True,
|
| 14 |
bnb_4bit_compute_dtype=torch.float16,
|
| 15 |
bnb_4bit_use_double_quant=True
|
| 16 |
)
|
| 17 |
+
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
|
| 19 |
model = AutoModelForCausalLM.from_pretrained(
|
| 20 |
model_name,
|
| 21 |
+
load_in_4bit=True,
|
| 22 |
quantization_config=bnb_config,
|
| 23 |
+
device_map="auto",
|
| 24 |
+
use_auth_token=HF_TOKEN
|
| 25 |
)
|
| 26 |
+
|
| 27 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 28 |
|
| 29 |
# Load the image model
|
| 30 |
@st.cache_resource
|
| 31 |
def load_image_model():
|
| 32 |
model_id = "runwayml/stable-diffusion-v1-5"
|
| 33 |
+
|
| 34 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 35 |
+
model_id, torch_dtype=torch.float16, use_auth_token=HF_TOKEN
|
| 36 |
+
)
|
| 37 |
pipe.to("cuda") # Use GPU
|
| 38 |
+
|
| 39 |
return pipe
|
| 40 |
|
| 41 |
llama_pipe = load_text_model()
|
|
|
|
| 60 |
image = image_pipe(keywords).images[0]
|
| 61 |
st.subheader("Generated Image")
|
| 62 |
st.image(image, caption="AI-Generated Image", use_column_width=True)
|
|
|