Exceltic commited on
Commit
cb9c997
·
verified ·
1 Parent(s): bb2a31f

Fix app.py 3

Browse files
Files changed (1) hide show
  1. app.py +17 -25
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import torch
2
- import onnx
3
  import onnxruntime as rt
4
  from torchvision import transforms as T
5
  from PIL import Image
@@ -15,26 +14,20 @@ charset = r"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%
15
  tokenizer_base = Tokenizer(charset)
16
 
17
  def get_transform(img_size):
18
- transforms = [
19
  T.Resize(img_size, T.InterpolationMode.BICUBIC),
20
  T.ToTensor(),
21
  T.Normalize(0.5, 0.5)
22
- ]
23
- return T.Compose(transforms)
24
 
25
- def to_numpy(tensor):
26
- return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
27
 
28
- def initialize_model(model_file):
29
- transform = get_transform(img_size)
30
- ort_session = rt.InferenceSession(model_file)
31
- return transform, ort_session
32
-
33
- def get_text(img_org):
34
  try:
35
- if img_org is None: return ""
36
- x = transform(img_org.convert('RGB')).unsqueeze(0)
37
- ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
38
  logits = ort_session.run(None, ort_inputs)[0]
39
  probs = torch.tensor(logits).softmax(-1)
40
  preds, _ = tokenizer_base.decode(probs)
@@ -42,15 +35,14 @@ def get_text(img_org):
42
  except Exception as e:
43
  return str(e)
44
 
45
- transform, ort_session = initialize_model(model_file=model_file)
46
-
47
- # INTERFAZ MÍNIMA (Sin ejemplos para evitar el bug del log)
48
- demo = gr.Interface(
49
- fn=get_text,
50
- inputs=gr.Image(type="pil"),
51
- outputs=gr.Textbox(),
52
- title="Captcha Reader API"
53
- )
54
 
55
  if __name__ == "__main__":
56
- demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False)
 
1
  import torch
 
2
  import onnxruntime as rt
3
  from torchvision import transforms as T
4
  from PIL import Image
 
14
  tokenizer_base = Tokenizer(charset)
15
 
16
  def get_transform(img_size):
17
+ return T.Compose([
18
  T.Resize(img_size, T.InterpolationMode.BICUBIC),
19
  T.ToTensor(),
20
  T.Normalize(0.5, 0.5)
21
+ ])
 
22
 
23
+ transform = get_transform(img_size)
24
+ ort_session = rt.InferenceSession(model_file)
25
 
26
+ def predict(img):
 
 
 
 
 
27
  try:
28
+ if img is None: return "Sube una imagen"
29
+ x = transform(img.convert('RGB')).unsqueeze(0)
30
+ ort_inputs = {ort_session.get_inputs()[0].name: x.detach().cpu().numpy()}
31
  logits = ort_session.run(None, ort_inputs)[0]
32
  probs = torch.tensor(logits).softmax(-1)
33
  preds, _ = tokenizer_base.decode(probs)
 
35
  except Exception as e:
36
  return str(e)
37
 
38
+ # Usamos Blocks para evitar el generador automático de API que está roto
39
+ with gr.Blocks() as demo:
40
+ gr.Markdown("# Captcha Solver")
41
+ with gr.Row():
42
+ input_img = gr.Image(type="pil")
43
+ output_text = gr.Textbox()
44
+ btn = gr.Button("Resolver")
45
+ btn.click(fn=predict, inputs=input_img, outputs=output_text)
 
46
 
47
  if __name__ == "__main__":
48
+ demo.launch(server_name="0.0.0.0", server_port=7860)