Spaces:
Runtime error
Runtime error
Fix app.py 3
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import torch
|
| 2 |
-
import onnx
|
| 3 |
import onnxruntime as rt
|
| 4 |
from torchvision import transforms as T
|
| 5 |
from PIL import Image
|
|
@@ -15,26 +14,20 @@ charset = r"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%
|
|
| 15 |
tokenizer_base = Tokenizer(charset)
|
| 16 |
|
| 17 |
def get_transform(img_size):
|
| 18 |
-
|
| 19 |
T.Resize(img_size, T.InterpolationMode.BICUBIC),
|
| 20 |
T.ToTensor(),
|
| 21 |
T.Normalize(0.5, 0.5)
|
| 22 |
-
]
|
| 23 |
-
return T.Compose(transforms)
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
|
| 28 |
-
def
|
| 29 |
-
transform = get_transform(img_size)
|
| 30 |
-
ort_session = rt.InferenceSession(model_file)
|
| 31 |
-
return transform, ort_session
|
| 32 |
-
|
| 33 |
-
def get_text(img_org):
|
| 34 |
try:
|
| 35 |
-
if
|
| 36 |
-
x = transform(
|
| 37 |
-
ort_inputs = {ort_session.get_inputs()[0].name:
|
| 38 |
logits = ort_session.run(None, ort_inputs)[0]
|
| 39 |
probs = torch.tensor(logits).softmax(-1)
|
| 40 |
preds, _ = tokenizer_base.decode(probs)
|
|
@@ -42,15 +35,14 @@ def get_text(img_org):
|
|
| 42 |
except Exception as e:
|
| 43 |
return str(e)
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
#
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
)
|
| 54 |
|
| 55 |
if __name__ == "__main__":
|
| 56 |
-
demo.launch(server_name="0.0.0.0", server_port=7860
|
|
|
|
| 1 |
import torch
|
|
|
|
| 2 |
import onnxruntime as rt
|
| 3 |
from torchvision import transforms as T
|
| 4 |
from PIL import Image
|
|
|
|
| 14 |
tokenizer_base = Tokenizer(charset)
|
| 15 |
|
| 16 |
def get_transform(img_size):
|
| 17 |
+
return T.Compose([
|
| 18 |
T.Resize(img_size, T.InterpolationMode.BICUBIC),
|
| 19 |
T.ToTensor(),
|
| 20 |
T.Normalize(0.5, 0.5)
|
| 21 |
+
])
|
|
|
|
| 22 |
|
| 23 |
+
transform = get_transform(img_size)
|
| 24 |
+
ort_session = rt.InferenceSession(model_file)
|
| 25 |
|
| 26 |
+
def predict(img):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
try:
|
| 28 |
+
if img is None: return "Sube una imagen"
|
| 29 |
+
x = transform(img.convert('RGB')).unsqueeze(0)
|
| 30 |
+
ort_inputs = {ort_session.get_inputs()[0].name: x.detach().cpu().numpy()}
|
| 31 |
logits = ort_session.run(None, ort_inputs)[0]
|
| 32 |
probs = torch.tensor(logits).softmax(-1)
|
| 33 |
preds, _ = tokenizer_base.decode(probs)
|
|
|
|
| 35 |
except Exception as e:
|
| 36 |
return str(e)
|
| 37 |
|
| 38 |
+
# Usamos Blocks para evitar el generador automático de API que está roto
|
| 39 |
+
with gr.Blocks() as demo:
|
| 40 |
+
gr.Markdown("# Captcha Solver")
|
| 41 |
+
with gr.Row():
|
| 42 |
+
input_img = gr.Image(type="pil")
|
| 43 |
+
output_text = gr.Textbox()
|
| 44 |
+
btn = gr.Button("Resolver")
|
| 45 |
+
btn.click(fn=predict, inputs=input_img, outputs=output_text)
|
|
|
|
| 46 |
|
| 47 |
if __name__ == "__main__":
|
| 48 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|