Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -95,8 +95,8 @@ model3.eval()
|
|
| 95 |
# model2.load_state_dict(torch.load('model2.pth', map_location=torch.device('cpu')))
|
| 96 |
# model2.eval()
|
| 97 |
|
| 98 |
-
def predict(input_img):
|
| 99 |
-
input_img = Image.open(input_img
|
| 100 |
transform = transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()])
|
| 101 |
input_img = transform(input_img)
|
| 102 |
input_img = torch.unsqueeze(input_img, 0)
|
|
@@ -123,7 +123,7 @@ title="Image to Coloring Page Generator"
|
|
| 123 |
|
| 124 |
|
| 125 |
iface = gr.Interface(predict, [gr.inputs.Image(type='filepath'),
|
| 126 |
-
gr.inputs.Radio(['Complex Lines'], type="value", default='Complex Lines', label='version')],
|
| 127 |
gr.outputs.Image(type="pil"))
|
| 128 |
|
| 129 |
iface.launch()
|
|
|
|
| 95 |
# model2.load_state_dict(torch.load('model2.pth', map_location=torch.device('cpu')))
|
| 96 |
# model2.eval()
|
| 97 |
|
| 98 |
+
def predict(input_img, ver):
|
| 99 |
+
input_img = Image.open(input_img)
|
| 100 |
transform = transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()])
|
| 101 |
input_img = transform(input_img)
|
| 102 |
input_img = torch.unsqueeze(input_img, 0)
|
|
|
|
| 123 |
|
| 124 |
|
| 125 |
iface = gr.Interface(predict, [gr.inputs.Image(type='filepath'),
|
| 126 |
+
gr.inputs.Radio(['Complex Lines','Simple Lines'], type="value", default='Complex Lines', label='version')],
|
| 127 |
gr.outputs.Image(type="pil"))
|
| 128 |
|
| 129 |
iface.launch()
|