Spaces:
Runtime error
Runtime error
| import sys | |
| import huggingface_hub | |
| if not hasattr(huggingface_hub, "HfFolder"): | |
| class MockHfFolder: | |
| def get_token(): return None | |
| huggingface_hub.HfFolder = MockHfFolder | |
| import torch | |
| import onnxruntime as rt | |
| from torchvision import transforms as T | |
| from PIL import Image | |
| from tokenizer_base import Tokenizer | |
| import pathlib | |
| import os | |
| import gradio as gr | |
| # Configuración de rutas | |
| cwd = pathlib.Path(__file__).parent.resolve() | |
| model_file = os.path.join(cwd, "secret_models", "captcha.onnx") | |
| img_size = (32, 128) | |
| charset = r"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" | |
| tokenizer_base = Tokenizer(charset) | |
| def get_transform(img_size): | |
| return T.Compose([ | |
| T.Resize(img_size, T.InterpolationMode.BICUBIC), | |
| T.ToTensor(), | |
| T.Normalize(0.5, 0.5) | |
| ]) | |
| transform = get_transform(img_size) | |
| ort_session = rt.InferenceSession(model_file) | |
| def predict(img): | |
| try: | |
| if img is None: return "Error: No hay imagen" | |
| x = transform(img.convert('RGB')).unsqueeze(0) | |
| ort_inputs = {ort_session.get_inputs()[0].name: x.detach().cpu().numpy()} | |
| logits = ort_session.run(None, ort_inputs)[0] | |
| probs = torch.tensor(logits).softmax(-1) | |
| preds, _ = tokenizer_base.decode(probs) | |
| return preds[0] | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Interfaz simplificada con Blocks | |
| with gr.Blocks() as demo: | |
| gr.Markdown("### API Captcha Solver") | |
| input_img = gr.Image(type="pil") | |
| output_text = gr.Textbox() | |
| btn = gr.Button("Resolver") | |
| btn.click(fn=predict, inputs=input_img, outputs=output_text) | |
| if __name__ == "__main__": | |
| demo.launch(server_name="0.0.0.0", server_port=7860, show_api=False) |