Spaces:
Runtime error
Runtime error
File size: 4,605 Bytes
6612069 079df3e 6612069 4b8088c 6612069 4b8088c 6612069 8c966fa 6612069 8c966fa 6612069 079df3e dc65d72 ff227dd d193bdc ff227dd 6534168 dc65d72 ff227dd 6612069 5bba87a 5f63b81 8d3a4dc e8aabc5 6612069 5bba87a 6612069 804e80b 5bba87a 804e80b 6612069 b892eaa 8d3a4dc edff410 e8aabc5 b892eaa 950951e b892eaa 950951e d329dae 950951e 6612069 d329dae a760714 dc65d72 a760714 e4f9553 a760714 ff227dd dc65d72 a760714 6612069 5bba87a 6612069 e8aabc5 6612069 e8aabc5 c3e6625 351b316 6612069 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | from huggingface_hub import InferenceClient
from PIL import Image,ImageFont,ImageDraw
import gradio as gr
import requests
import random
import uuid
import io
from utils import models, MEME_GENERATOR,GENERATE_PROMPT
client = InferenceClient(
"mistralai/Mixtral-8x7B-Instruct-v0.1"
)
loaded_model=[]
for i,model in enumerate(models):
loaded_model.append(gr.load(f'models/{model}', cache_examples=False))
def textover(im,text):
x=0
y=0
t_fill = (0,0,0)
font_size=20
draw = ImageDraw.Draw(im)
font = ImageFont.truetype("./fonts/unifont-15.0.01.ttf", int(font_size))
draw.text((x, y),text, font = font, fill=t_fill)
return im
def get_concat_h_cut(in1, in2):
im1=Image.open(in1)
im2=Image.open(in2)
dst = Image.new('RGB', (im1.width + im2.width,
min(im1.height, im2.height)))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
def get_concat_v_cut(im1,theme='light'):
#im1=Image.open(in1)
if theme=='dark':
color=(31,41,55)
if theme=='light':
color=(255,255,255)
dst = Image.new('RGB', (im1.width, im1.height +200),color=color)
dst.paste(im1, (0, 200))
return dst
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
agents =[
"MEME_GENERATOR",
]
def generate(prompt, history):
print(f'HISTORY:: {history}')
history=[]
output1={}
seed = random.randint(1,1111111111111111)
system_prompt=MEME_GENERATOR
generate_kwargs = dict(
temperature=0.7,
max_new_tokens=256,
top_p=0.95,
repetition_penalty=1,
do_sample=True,
seed=seed,
)
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
output += response.token.text
yield [(prompt,output)],output1
if "PROMPT:" and "MEME_TEXT:" in output:
print("YES")
prompt_t=output.split("PROMPT:",1)[1].split("MEME_TEXT:",1)[0].strip()
print(prompt_t)
meme_t=output.split("MEME_TEXT:",1)[1].strip()
print(meme_t)
output1={'PROMPT':prompt_t,'MEME_TEXT':meme_t}
#output=str(output1)
yield [(prompt,output)],output1
def run(inp,model_drop):
prompt=inp['PROMPT']
text=inp['MEME_TEXT']
model=loaded_model[int(model_drop)]
out_img=model(prompt)
print(out_img)
url=f'https://omnibus-meme-diffusion.hf.space/file={out_img}'
print(url)
uid = uuid.uuid4()
r = requests.get(url, stream=True)
if r.status_code == 200:
out = Image.open(io.BytesIO(r.content))
out=get_concat_v_cut(out)
out=textover(out,text)
return out
def run_gpt(in_prompt,history,):
if len(in_prompt)>max_prompt:
in_prompt = condense(in_prompt)
print(f'history :: {history}')
prompt=format_prompt(in_prompt,history)
seed = random.randint(1,1111111111111111)
print (seed)
generate_kwargs = dict(
temperature=1.0,
max_new_tokens=1048,
top_p=0.99,
repetition_penalty=1.0,
do_sample=True,
seed=seed,
)
content = GENERATE_PROMPT + prompt
print(content)
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
resp = ""
for response in stream:
resp += response.token.text
return resp
with gr.Blocks() as app:
gr.HTML("""<center><h1>Meme Diffusion</h1></center>""")
with gr.Row():
with gr.Column(scale=1):
chatbot=gr.Chatbot()
msg = gr.Textbox()
model_drop=gr.Dropdown(label="Diffusion Models", type="index", choices=[m for m in models], value=models[0])
with gr.Group():
submit_b = gr.Button("Meme")
submit_im = gr.Button("Image")
with gr.Row():
stop_b = gr.Button("Stop")
clear = gr.ClearButton([msg, chatbot])
with gr.Column(scale=2):
im_out=gr.Image(label="Image")
json_out=gr.JSON()
sub_b = submit_b.click(generate, [msg,chatbot],[chatbot,json_out])
sub_im = submit_im.click(run, [json_out,model_drop],[im_out])
stop_b.click(None,None,None, cancels=[sub_b,sub_im])
app.launch() |