| <!DOCTYPE html> |
| <html> |
| <head> |
| <meta charset="utf-8"> |
| <meta name="viewport" content="width=device-width, initial-scale=1"> |
| <title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title> |
| <meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser"> |
|
|
| <script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script> |
| <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" /> |
|
|
| <style> |
| html, body { |
| margin: 0; |
| padding: 0; |
| height: 100%; |
| } |
| </style> |
| </head> |
| <body> |
| <gradio-lite> |
| <gradio-file name="app.py" entrypoint> |
| from transformers_js import import_transformers_js, as_url |
| import gradio as gr |
|
|
| transformers = await import_transformers_js() |
| pipeline = transformers.pipeline |
| pipe = await pipeline('object-detection', "Xenova/yolos-tiny") |
|
|
| async def detect(input_image): |
| result = await pipe(as_url(input_image)) |
| gradio_labels = [ |
| # List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]] |
| ( |
| ( |
| int(item["box"]["xmin"]), |
| int(item["box"]["ymin"]), |
| int(item["box"]["xmax"]), |
| int(item["box"]["ymax"]), |
| ), |
| item["label"], |
| ) |
| for item in result |
| ] |
| annotated_image_data = input_image, gradio_labels |
| return annotated_image_data, result |
|
|
| demo = gr.Interface( |
| detect, |
| gr.Image(type="filepath"), |
| [ |
| gr.AnnotatedImage(), |
| gr.JSON(), |
| ], |
| examples=[ |
| ["cats.jpg"] |
| ] |
| ) |
|
|
| demo.launch() |
| </gradio-file> |
|
|
| <gradio-file name="cats.jpg" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/cats.jpg" /> |
|
|
| <gradio-requirements> |
| transformers_js_py |
| </gradio-requirements> |
| </gradio-lite> |
| </body> |
| </html> |