Spaces:
Runtime error
Runtime error
change app to chat application to keep GPU
Browse files
app.py
CHANGED
|
@@ -1,156 +1,82 @@
|
|
| 1 |
import spaces
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
-
from
|
| 5 |
-
from
|
| 6 |
-
from
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
@spaces.GPU
|
| 11 |
-
def
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
|
| 31 |
-
torch_dtype=torch.float32,
|
| 32 |
-
variant="fp16",
|
| 33 |
-
).to("cuda")
|
| 34 |
-
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 35 |
-
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
|
| 36 |
-
pipe.fuse_lora()
|
| 37 |
-
|
| 38 |
-
if init_image is not None:
|
| 39 |
-
init_image_path = init_image.name # Get the file path
|
| 40 |
-
init_image = Image.open(init_image_path).resize((1024, 1024))
|
| 41 |
-
else:
|
| 42 |
-
raise ValueError("Initial image not provided or invalid")
|
| 43 |
-
|
| 44 |
-
if mask_image is not None:
|
| 45 |
-
mask_image_path = mask_image.name # Get the file path
|
| 46 |
-
mask_image = Image.open(mask_image_path).resize((1024, 1024))
|
| 47 |
-
else:
|
| 48 |
-
raise ValueError("Mask image not provided or invalid")
|
| 49 |
-
|
| 50 |
-
# Generate the inpainted image
|
| 51 |
-
generator = torch.manual_seed(42)
|
| 52 |
-
image = pipe(
|
| 53 |
-
prompt=prompt,
|
| 54 |
-
image=init_image,
|
| 55 |
-
mask_image=mask_image,
|
| 56 |
-
generator=generator,
|
| 57 |
-
num_inference_steps=num_inference_steps,
|
| 58 |
-
guidance_scale=guidance_scale,
|
| 59 |
-
).images[0]
|
| 60 |
-
|
| 61 |
-
return image
|
| 62 |
-
|
| 63 |
-
def generate_image_with_adapter(prompt, num_inference_steps, guidance_scale):
|
| 64 |
-
pipe = DiffusionPipeline.from_pretrained(
|
| 65 |
-
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 66 |
-
variant="fp16",
|
| 67 |
-
torch_dtype=torch.float32
|
| 68 |
-
).to("cuda")
|
| 69 |
-
|
| 70 |
-
# set scheduler
|
| 71 |
-
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 72 |
-
|
| 73 |
-
# Load and fuse lcm lora
|
| 74 |
-
pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl", adapter_name="lcm")
|
| 75 |
-
pipe.load_lora_weights("TheLastBen/Papercut_SDXL", weight_name="papercut.safetensors", adapter_name="papercut")
|
| 76 |
-
|
| 77 |
-
# Combine LoRAs
|
| 78 |
-
pipe.set_adapters(["lcm", "papercut"], adapter_weights=[1.0, 0.8])
|
| 79 |
-
pipe.fuse_lora()
|
| 80 |
-
generator = torch.manual_seed(0)
|
| 81 |
-
# Generate the image
|
| 82 |
-
image = pipe(prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=generator).images[0]
|
| 83 |
-
pipe.unfuse_lora()
|
| 84 |
-
return image
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
def modify_image(image, brightness, contrast):
|
| 88 |
-
# Function to modify brightness and contrast
|
| 89 |
-
image = Image.open(io.BytesIO(image))
|
| 90 |
-
enhancer = ImageEnhance.Brightness(image)
|
| 91 |
-
image = enhancer.enhance(brightness)
|
| 92 |
-
enhancer = ImageEnhance.Contrast(image)
|
| 93 |
-
image = enhancer.enhance(contrast)
|
| 94 |
-
return image
|
| 95 |
-
|
| 96 |
-
with gr.Blocks(gr.themes.Soft()) as demo:
|
| 97 |
-
with gr.Row():
|
| 98 |
-
gr.Markdown("## Latent Consistency for Diffusion Models")
|
| 99 |
-
gr.Markdown("Run this demo on your own machine if you would like: ```docker run -it -p 7860:7860 --platform=linux/amd64 --gpus all \
|
| 100 |
-
registry.hf.space/macadeliccc-lcm-papercut-demo:latest python app.py```")
|
| 101 |
-
with gr.Row():
|
| 102 |
-
image_output = gr.Image(label="Generated Image")
|
| 103 |
-
|
| 104 |
-
with gr.Row():
|
| 105 |
-
with gr.Accordion(label="Configuration Options"):
|
| 106 |
-
prompt_input = gr.Textbox(label="Prompt", placeholder="Self-portrait oil painting, a beautiful cyborg with golden hair, 8k")
|
| 107 |
-
steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
|
| 108 |
-
guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
|
| 109 |
-
generate_button = gr.Button("Generate Image")
|
| 110 |
-
with gr.Row():
|
| 111 |
-
with gr.Accordion(label="Papercut Image Generation"):
|
| 112 |
-
adapter_prompt_input = gr.Textbox(label="Prompt", placeholder="papercut, a cute fox")
|
| 113 |
-
adapter_steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
|
| 114 |
-
adapter_guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
|
| 115 |
-
adapter_generate_button = gr.Button("Generate Image with Adapter")
|
| 116 |
-
|
| 117 |
-
with gr.Row():
|
| 118 |
-
with gr.Accordion(label="Inpainting"):
|
| 119 |
-
inpaint_prompt_input = gr.Textbox(label="Prompt for Inpainting", placeholder="a castle on top of a mountain, highly detailed, 8k")
|
| 120 |
-
init_image_input = gr.File(label="Initial Image")
|
| 121 |
-
mask_image_input = gr.File(label="Mask Image")
|
| 122 |
-
inpaint_steps_input = gr.Slider(minimum=1, maximum=10, label="Inference Steps", value=4)
|
| 123 |
-
inpaint_guidance_input = gr.Slider(minimum=0, maximum=2, label="Guidance Scale", value=1)
|
| 124 |
-
inpaint_button = gr.Button("Inpaint Image")
|
| 125 |
-
|
| 126 |
-
with gr.Row():
|
| 127 |
-
with gr.Accordion(label="Image Modification (Experimental)"):
|
| 128 |
-
brightness_slider = gr.Slider(minimum=0.5, maximum=1.5, step=1, label="Brightness")
|
| 129 |
-
contrast_slider = gr.Slider(minimum=0.5, maximum=1.5, step=1, label="Contrast")
|
| 130 |
-
modify_button = gr.Button("Modify Image")
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
generate_button.click(
|
| 135 |
-
generate_image,
|
| 136 |
-
inputs=[prompt_input, steps_input, guidance_input],
|
| 137 |
-
outputs=image_output
|
| 138 |
-
)
|
| 139 |
-
|
| 140 |
-
modify_button.click(
|
| 141 |
-
modify_image,
|
| 142 |
-
inputs=[image_output, brightness_slider, contrast_slider],
|
| 143 |
-
outputs=image_output
|
| 144 |
)
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import spaces
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
+
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
| 6 |
+
from threading import Thread
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
torch.set_default_device("cuda")
|
| 10 |
+
|
| 11 |
+
# Loading the tokenizer and model from Hugging Face's model hub.
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 13 |
+
"macadeliccc/SOLAR-math-2x10.7b",
|
| 14 |
+
trust_remote_code=True
|
| 15 |
+
)
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
+
"macadeliccc/SOLAR-math-2x10.7b",
|
| 18 |
+
torch_dtype="auto",
|
| 19 |
+
load_in_8bit=True,
|
| 20 |
+
trust_remote_code=True
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Defining a custom stopping criteria class for the model's text generation.
|
| 24 |
+
class StopOnTokens(StoppingCriteria):
|
| 25 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
| 26 |
+
stop_ids = [50256, 50295] # IDs of tokens where the generation should stop.
|
| 27 |
+
for stop_id in stop_ids:
|
| 28 |
+
if input_ids[0][-1] == stop_id: # Checking if the last generated token is a stop token.
|
| 29 |
+
return True
|
| 30 |
+
return False
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Function to generate model predictions.
|
| 34 |
@spaces.GPU
|
| 35 |
+
def predict(message, history):
|
| 36 |
+
history_transformer_format = history + [[message, ""]]
|
| 37 |
+
stop = StopOnTokens()
|
| 38 |
+
|
| 39 |
+
# Formatting the input for the model.
|
| 40 |
+
system_prompt = "<|im_start|>system\nYou are Solar, a helpful AI assistant.<|im_end|>"
|
| 41 |
+
messages = system_prompt + "".join(["".join(["\n<|im_start|>user\n" + item[0], "<|im_end|>\n<|im_start|>assistant\n" + item[1]]) for item in history_transformer_format])
|
| 42 |
+
input_ids = tokenizer([messages], return_tensors="pt").to('cuda')
|
| 43 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
| 44 |
+
generate_kwargs = dict(
|
| 45 |
+
input_ids,
|
| 46 |
+
streamer=streamer,
|
| 47 |
+
max_new_tokens=1024,
|
| 48 |
+
do_sample=True,
|
| 49 |
+
top_p=0.95,
|
| 50 |
+
top_k=50,
|
| 51 |
+
temperature=0.7,
|
| 52 |
+
num_beams=1,
|
| 53 |
+
stopping_criteria=StoppingCriteriaList([stop])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
)
|
| 55 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
| 56 |
+
t.start() # Starting the generation in a separate thread.
|
| 57 |
+
partial_message = ""
|
| 58 |
+
for new_token in streamer:
|
| 59 |
+
partial_message += new_token
|
| 60 |
+
if '<|im_end|>' in partial_message: # Breaking the loop if the stop token is generated.
|
| 61 |
+
break
|
| 62 |
+
yield partial_message
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Setting up the Gradio chat interface.
|
| 66 |
+
gr.ChatInterface(predict,
|
| 67 |
+
description="""
|
| 68 |
+
<center><img src="https://huggingface.co/macadeliccc/SOLAR-math-2x10.7b-v0.2/resolve/main/solar.png" width="33%"></center>\n\n
|
| 69 |
+
Chat with [macadeliccc/SOLAR-math-2x10.7b-v0.2](https://huggingface.co/macadeliccc/SOLAR-math-2x10.7b-v0.2), the first Mixture of Experts made by merging two fine-tuned [upstage/SOLAR-10.7B-v1.0](https://huggingface.co/upstage/SOLAR-10.7B-v1.0) models.
|
| 70 |
+
This large model (19.2B param) is good for various tasks, such as programming, dialogues, story writing, and more.\n\n
|
| 71 |
+
❤️ If you like this work, please follow me on [Hugging Face](https://huggingface.co/macadeliccc) and [LinkedIn](https://www.linkedin.com/in/tim-dolan-python-dev/).
|
| 72 |
+
""",
|
| 73 |
+
examples=[
|
| 74 |
+
'Can you solve the equation 2x + 3 = 11 for x?',
|
| 75 |
+
'How does Fermats last theorem impact number theory?',
|
| 76 |
+
'What is a vector in the scope of computer science rather than physics?',
|
| 77 |
+
'Use a list comprehension to create a list of squares for numbers from 1 to 10.',
|
| 78 |
+
'Recommend some popular science fiction books.',
|
| 79 |
+
'Can you write a short story about a time-traveling detective?'
|
| 80 |
+
],
|
| 81 |
+
theme=gr.themes.Soft(primary_hue="orange"),
|
| 82 |
+
).launch()
|