Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,6 +13,38 @@ loaded_model=[]
|
|
| 13 |
for i,model in enumerate(models):
|
| 14 |
loaded_model.append(gr.load(f'models/{model}', cache_examples=False))
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
def format_prompt(message, history):
|
| 17 |
prompt = "<s>"
|
| 18 |
for user_prompt, bot_response in history:
|
|
@@ -69,6 +101,7 @@ def run(inp,model_drop):
|
|
| 69 |
r = requests.get(url, stream=True)
|
| 70 |
if r.status_code == 200:
|
| 71 |
out = Image.open(io.BytesIO(r.content))
|
|
|
|
| 72 |
return out
|
| 73 |
|
| 74 |
def run_gpt(in_prompt,history,):
|
|
|
|
| 13 |
for i,model in enumerate(models):
|
| 14 |
loaded_model.append(gr.load(f'models/{model}', cache_examples=False))
|
| 15 |
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_concat_h_cut(in1, in2):
|
| 24 |
+
im1=Image.open(in1)
|
| 25 |
+
im2=Image.open(in2)
|
| 26 |
+
dst = Image.new('RGB', (im1.width + im2.width,
|
| 27 |
+
min(im1.height, im2.height)))
|
| 28 |
+
dst.paste(im1, (0, 0))
|
| 29 |
+
dst.paste(im2, (im1.width, 0))
|
| 30 |
+
return dst
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def get_concat_v_cut(in1,theme='light'):
|
| 34 |
+
im1=Image.open(in1)
|
| 35 |
+
if theme=='dark':
|
| 36 |
+
color=(31,41,55)
|
| 37 |
+
if theme=='light':
|
| 38 |
+
color=(255,255,255)
|
| 39 |
+
dst = Image.new('RGB', (min(im1.width, im2.width), im1.height + im2.height+100),color=color)
|
| 40 |
+
dst.paste(im1, (0, 0))
|
| 41 |
+
return dst
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
def format_prompt(message, history):
|
| 49 |
prompt = "<s>"
|
| 50 |
for user_prompt, bot_response in history:
|
|
|
|
| 101 |
r = requests.get(url, stream=True)
|
| 102 |
if r.status_code == 200:
|
| 103 |
out = Image.open(io.BytesIO(r.content))
|
| 104 |
+
out=get_concat_v_cut(out)
|
| 105 |
return out
|
| 106 |
|
| 107 |
def run_gpt(in_prompt,history,):
|