Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
import
|
| 2 |
from PIL import Image
|
| 3 |
import cv2 as cv
|
| 4 |
|
|
@@ -303,8 +303,10 @@ def visualize(model, data, dims):
|
|
| 303 |
# t_img = transforms.Resize((dims[0], dims[1]))(t_img)
|
| 304 |
img = Image.fromarray(np.uint8(fake_imgs[i]))
|
| 305 |
img = cv.resize(fake_imgs[i], dsize=(dims[1], dims[0]), interpolation=cv.INTER_CUBIC)
|
|
|
|
| 306 |
# st.text(f"Size of fake image {fake_imgs[i].shape} \n Type of image = {type(fake_imgs[i])}")
|
| 307 |
-
st.image(img, caption="Output image", use_column_width='auto', clamp=True)
|
|
|
|
| 308 |
|
| 309 |
def log_results(loss_meter_dict):
|
| 310 |
for loss_name, loss_meter in loss_meter_dict.items():
|
|
@@ -352,14 +354,26 @@ def make_dataloaders2(batch_size=16, n_workers=4, pin_memory=True, **kwargs): #
|
|
| 352 |
pin_memory=pin_memory)
|
| 353 |
return dataloader
|
| 354 |
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
st.text(body=f"Size of uploaded image {im.shape}")
|
| 359 |
a = im.shape
|
| 360 |
-
st.image(im, caption="Uploaded Image.", use_column_width='auto')
|
| 361 |
test_dl = make_dataloaders2(img_list=[im])
|
| 362 |
for data in test_dl:
|
| 363 |
model.setup_input(data)
|
| 364 |
model.optimize()
|
| 365 |
-
visualize(model, data, a)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
from PIL import Image
|
| 3 |
import cv2 as cv
|
| 4 |
|
|
|
|
| 303 |
# t_img = transforms.Resize((dims[0], dims[1]))(t_img)
|
| 304 |
img = Image.fromarray(np.uint8(fake_imgs[i]))
|
| 305 |
img = cv.resize(fake_imgs[i], dsize=(dims[1], dims[0]), interpolation=cv.INTER_CUBIC)
|
| 306 |
+
return img
|
| 307 |
# st.text(f"Size of fake image {fake_imgs[i].shape} \n Type of image = {type(fake_imgs[i])}")
|
| 308 |
+
# st.image(img, caption="Output image", use_column_width='auto', clamp=True)
|
| 309 |
+
|
| 310 |
|
| 311 |
def log_results(loss_meter_dict):
|
| 312 |
for loss_name, loss_meter in loss_meter_dict.items():
|
|
|
|
| 354 |
pin_memory=pin_memory)
|
| 355 |
return dataloader
|
| 356 |
|
| 357 |
+
def main_func(filepath):
|
| 358 |
+
im = Image.open(filepath)
|
| 359 |
+
size_text=f"Size of uploaded image {im.shape}"
|
| 360 |
+
# st.text(body=f"Size of uploaded image {im.shape}")
|
| 361 |
a = im.shape
|
| 362 |
+
# st.image(im, caption="Uploaded Image.", use_column_width='auto')
|
| 363 |
test_dl = make_dataloaders2(img_list=[im])
|
| 364 |
for data in test_dl:
|
| 365 |
model.setup_input(data)
|
| 366 |
model.optimize()
|
| 367 |
+
img=visualize(model, data, a)
|
| 368 |
+
return (size_text,img)
|
| 369 |
+
|
| 370 |
+
title = "PicSum"
|
| 371 |
+
description = "Gradio demo for PicSum project. You can give an image as input on the left side and then click on the submit button. The generated text, summary, important sentences and fill in the gaps would be generated on the right side."
|
| 372 |
+
gr.Interface(
|
| 373 |
+
extract,
|
| 374 |
+
[gr.inputs.Image(type="filepath", label="Input"),gr.inputs.CheckboxGroup(choices, type="value", default=['Generate text'], label='Options') ],
|
| 375 |
+
[gr.outputs.Textbox(label="Generated Text"),"image"],
|
| 376 |
+
title=title,
|
| 377 |
+
description=description,
|
| 378 |
+
# examples=[['a.png', ['Generate text']], ['b.png', ['Generate text','Summary','Important Sentences']], ]
|
| 379 |
+
).launch(enable_queue=True)
|