Spaces:
Running
Running
File size: 4,266 Bytes
111a99e 2fcfad9 111a99e 9f9c33b 2fcfad9 111a99e b38e046 111a99e b38e046 9f9c33b 2fcfad9 111ff5f d5a7e96 2fcfad9 111ff5f 2fcfad9 111a99e 111ff5f 111a99e 9f9c33b 111a99e 9f9c33b 111a99e b77caf3 111a99e b77caf3 111a99e b77caf3 9f9c33b a608f20 111ff5f 2fcfad9 111ff5f 2fcfad9 111ff5f 2fcfad9 111ff5f 2fcfad9 111ff5f d5a7e96 111ff5f d5a7e96 b77caf3 111ff5f 2fcfad9 111ff5f 2fcfad9 111ff5f d5a7e96 111ff5f d5a7e96 b77caf3 111ff5f d5a7e96 b77caf3 d5a7e96 111ff5f d5a7e96 111ff5f 2fcfad9 111ff5f 9f9c33b 111a99e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
#!/usr/bin/env python3
import os
import json
import base64
import requests
import gradio as gr
from PIL import Image
from io import BytesIO
ENDPOINT = os.environ.get("VLLM_ENDPOINT")
MODEL = os.environ.get("VLLM_MODEL")
if not ENDPOINT or not MODEL:
raise ValueError("VLLM_ENDPOINT and VLLM_MODEL environment variables must be set. Please add them as secrets in your Space settings.")
def image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode("utf-8")
def process_image(image, temperature):
if image is None:
yield "Please upload an image first.", ""
return
b64_image = image_to_base64(image)
payload = {
"model": MODEL,
"messages": [
{
"role": "user",
"content": [
{"type": "text", "text": ""},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{b64_image}"}}
]
}
],
"temperature": temperature,
"stream": True
}
try:
response = requests.post(
ENDPOINT,
headers={"Content-Type": "application/json"},
data=json.dumps(payload),
stream=True
)
response.raise_for_status()
accumulated_response = ""
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
line = line[6:]
if line.strip() == '[DONE]':
break
try:
chunk = json.loads(line)
if 'choices' in chunk and len(chunk['choices']) > 0:
delta = chunk['choices'][0].get('delta', {})
content = delta.get('content', '')
if content:
accumulated_response += content
yield accumulated_response, accumulated_response
except json.JSONDecodeError:
continue
except Exception as e:
error_msg = f"Error: {str(e)}"
yield error_msg, error_msg
with gr.Blocks(title="π Image OCR", theme=gr.themes.Soft()) as demo:
gr.Markdown(
"""
# π Image to Text Extraction
**π‘ How to use:**
1. Upload an image using the upload box
2. Adjust temperature if needed
3. Click "Extract Text" to process
The model will extract and format text from your image.
"""
)
with gr.Row():
with gr.Column():
image_input = gr.Image(
type="pil",
label="πΌοΈ Upload Image",
sources=["upload", "clipboard"],
height=600
)
temperature = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.15,
step=0.05,
label="Temperature"
)
submit_btn = gr.Button("Extract Text", variant="primary")
clear_btn = gr.Button("Clear", variant="secondary")
with gr.Column():
output_text = gr.Markdown(
label="π Extracted Text (Rendered)",
value="<div style='min-height: 600px; padding: 10px; border: 1px solid #e0e0e0; border-radius: 4px; background-color: #f9f9f9;'><em>Extracted text will appear here...</em></div>",
height=600
)
with gr.Row():
with gr.Column():
raw_output = gr.Textbox(
label="Raw Markdown Output",
placeholder="Raw text will appear here...",
lines=30,
show_copy_button=True
)
submit_btn.click(
fn=process_image,
inputs=[image_input, temperature],
outputs=[output_text, raw_output]
)
clear_btn.click(
fn=lambda: (None, "", ""),
outputs=[image_input, output_text, raw_output]
)
if __name__ == "__main__":
demo.launch() |