miniCPM / app.py
Suvadeep Das
Create app.py
b3a4de8 verified
raw
history blame
12.9 kB
import gradio as gr
import torch
from transformers import AutoModel, AutoTokenizer
from PIL import Image
import base64
import io
import os
from huggingface_hub import login
from pdf2image import convert_from_bytes
import tempfile
# Set your HF token (add this to your Space secrets)
HF_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN")
if HF_TOKEN:
login(token=HF_TOKEN)
# Load MiniCPM model
@gr.cache
def load_model():
try:
tokenizer = AutoTokenizer.from_pretrained(
"openbmb/MiniCPM-V-2_6",
trust_remote_code=True
)
model = AutoModel.from_pretrained(
"openbmb/MiniCPM-V-2_6",
trust_remote_code=True,
torch_dtype=torch.float16,
device_map="cpu" # Use CPU for free tier
)
return model, tokenizer
except Exception as e:
# Fallback to non-gated version if access issues
print(f"Error loading gated model: {e}")
tokenizer = AutoTokenizer.from_pretrained(
"openbmb/MiniCPM-V-2",
trust_remote_code=True
)
model = AutoModel.from_pretrained(
"openbmb/MiniCPM-V-2",
trust_remote_code=True,
torch_dtype=torch.float16,
device_map="cpu"
)
return model, tokenizer
# Initialize model
model, tokenizer = load_model()
def pdf_to_images(pdf_file):
"""Convert PDF file to list of PIL images"""
try:
# Read PDF bytes
if hasattr(pdf_file, 'read'):
pdf_bytes = pdf_file.read()
else:
with open(pdf_file, 'rb') as f:
pdf_bytes = f.read()
# Convert PDF to images (300 DPI for good quality)
images = convert_from_bytes(pdf_bytes, dpi=300)
return images
except Exception as e:
print(f"Error converting PDF to images: {e}")
return []
def extract_data_from_image(image, extraction_prompt):
"""Extract data from a single image using MiniCPM"""
try:
# Prepare messages for MiniCPM
messages = [
{
'role': 'user',
'content': [
{'type': 'text', 'text': extraction_prompt},
{'type': 'image', 'image': image}
]
}
]
# Generate response
response = model.chat(
image=image,
msgs=messages,
tokenizer=tokenizer,
sampling=True,
temperature=0.1
)
return {
"status": "success",
"extracted_data": response,
"model_used": "MiniCPM-V-2_6"
}
except Exception as e:
return {
"status": "error",
"error": str(e),
"extracted_data": None
}
def extract_efax_from_pdf(pdf_file, extraction_prompt="You are a deterministic medical data extraction engine. You will receive medical documents in various layouts. Your task is to extract specific fields into a strictly structured JSON format, including realistic confidence scores, with no assumptions or corrections.\n\nYour response MUST follow this exact JSON format:\n\n{\n \"data\": { ... },\n \"confidence_scores\": { ... },\n \"fields_needing_review\": [ ... ],\n \"metadata\": {\n \"extraction_timestamp\": \"<ISO 8601 or UUID>\",\n \"model_used\": \"gpt-4o\",\n \"confidence_threshold\": 0.9,\n \"requires_human_review\": <true|false>\n }\n}\n\n— All extracted fields must appear exactly as found in the document.\n— Confidence scores MUST be realistic floats between 0.0 and 1.0.\n— NEVER default to 0.0 unless data is missing or unreadable.\n— Include all mandatory fields below, even if empty.\n— If any field has confidence < 0.9, add it to `fields_needing_review` and set `requires_human_review` to true.\n\n--------------------------------\nSTRICT FIELD FORMATTING RULES:\n--------------------------------\n\n• Dates: Format as MM/DD/YYYY only\n• Phone numbers: Use digits and hyphens only (e.g., 406-596-1901), no extensions or parentheses\n• Gender: \"Male\", \"Female\", or \"Other\" only\n• Email: Must contain @ and valid domain, otherwise leave empty\n• Zip code: Only extract as last 5 digits of address\n\n--------------------------------\nREFERRAL SOURCE RULES:\n--------------------------------\n\n• Extract clinic/hospital/facility name ONLY – never the provider's name\n• Use facility’s phone/fax/email, not individual provider’s contact\n• Prefer header/fax banner for referral source over body text\n• Do not extract receiver clinic names (e.g., Frontier Psychiatry) as referral source\n\n--------------------------------\nINSURANCE EXTRACTION FORMAT:\n--------------------------------\n\nEach tier must follow this structure:\n\"primary_insurance\": {\n \"payer_name\": \"string\",\n \"member_id\": \"string\",\n \"group_id\": \"string\"\n},\n\"secondary_insurance\": { ... },\n\"tertiary_insurance\": { ... }\n\n• Use \"member_id\" for any ID (Policy, Insurance ID, Subscriber ID, etc.)\n• Use \"group_id\" ONLY if explicitly labeled as \"Group ID\", \"Group Number\", etc.\n• Leave all fields empty if \"Self Pay\" is indicated\n\n--------------------------------\nDIAGNOSIS EXTRACTION RULES:\n--------------------------------\n\n• Extract diagnosis codes AND their descriptions\n• If only code is present, set description to \"\" and confidence ≤ 0.6\n• DO NOT infer description from ICD code\n\n--------------------------------\nMANDATORY FIELDS TO EXTRACT:\n--------------------------------\n\n• date_of_receipt\n• patient_first_name\n• patient_last_name\n• patient_dob\n• patient_gender\n• patient_primary_phone_number\n• patient_secondary_phone_number\n• patient_email\n• patient_address\n• patient_zip_code\n• referral_source\n• referral_source_phone_no\n• referral_source_fax_no\n• referral_source_email\n• primary_insurance\n• secondary_insurance\n• tertiary_insurance\n• priority (\"Routine\" or \"Urgent\" ONLY)\n• reason_for_referral\n• diagnosis_informations (list of { code, description })\n• refine_reason\n• extracted_page_numbers (list of page numbers where data was found)\n\n--------------------------------\nCONFIDENCE SCORING:\n--------------------------------\n\nAssign realistic confidence (0.0–1.0) per field, e.g.:\n\n• 0.95–1.0 → Clearly labeled, unambiguous data\n• 0.7–0.94 → Some uncertainty (low quality, odd format)\n• 0.0–0.6 → Missing, ambiguous, or noisy data\n• Use float precision (e.g., 0.87, not just 1.0)\n\nAlways populate the `confidence_scores` dictionary with the same structure as `data`.\n\nIf any score < 0.9, populate `fields_needing_review` and set `requires_human_review = true`.\n\n--------------------------------\nFINAL REMINDERS:\n--------------------------------\n\n• No assumptions or corrections – only extract what’s visible\n• Follow exact field formatting and nesting\n• Maintain reproducibility and determinism\n• Return full structure even if some fields are empty\n• NEVER skip the confidence_scores section\n\nRespond only with the valid JSON."):
"""Main function to process multi-page PDF eFax"""
try:
if pdf_file is None:
return {
"status": "error",
"error": "No PDF file provided",
"total_pages": 0,
"pages_data": []
}
# Convert PDF to images
images = pdf_to_images(pdf_file)
if not images:
return {
"status": "error",
"error": "Could not convert PDF to images",
"total_pages": 0,
"pages_data": []
}
# Process each page
pages_data = []
for i, image in enumerate(images):
page_result = extract_data_from_image(image, extraction_prompt)
pages_data.append({
"page_number": i + 1,
"page_data": page_result
})
# Aggregate results
aggregated_result = {
"status": "success",
"total_pages": len(images),
"pages_data": pages_data,
"model_used": "MiniCPM-V-2_6",
"extraction_prompt": extraction_prompt
}
return aggregated_result
except Exception as e:
return {
"status": "error",
"error": str(e),
"total_pages": 0,
"pages_data": []
}
# Create Gradio Interface
def create_gradio_interface():
with gr.Blocks(title="eFax PDF Data Extractor") as demo:
gr.Markdown("# eFax PDF Data Extraction API using MiniCPM")
gr.Markdown("Upload a multi-page eFax PDF to extract structured data from all pages")
with gr.Tab("PDF Upload & Extraction"):
with gr.Row():
with gr.Column():
pdf_input = gr.File(
file_types=[".pdf"],
label="Upload eFax PDF",
file_count="single"
)
prompt_input = gr.Textbox(
value="Extract patient name, email, phone number, medical details, and all relevant information from this eFax page",
label="Extraction Prompt (applied to each page)",
lines=3
)
extract_btn = gr.Button("Extract Data from PDF", variant="primary")
with gr.Column():
output = gr.JSON(label="Extracted Data (All Pages)")
with gr.Tab("API Usage"):
gr.Markdown("""
## API Endpoints
Once deployed, you can use this Space as an API for PDF processing:
### Python API Usage
```
import requests
import base64
# Convert PDF to base64
with open("efax.pdf", "rb") as f:
pdf_b64 = base64.b64encode(f.read()).decode()
response = requests.post(
"https://your-username-extracting-efax.hf.space/api/predict",
json={
"data": [
{"name": "efax.pdf", "data": f"application/pdf;base64,{pdf_b64}"},
"Extract all patient data from this eFax"
]
}
)
result = response.json()
print("Total pages:", result["data"]["total_pages"])
for page in result["data"]["pages_data"]:
print(f"Page {page['page_number']}:", page["page_data"]["extracted_data"])
```
### cURL Example
```
curl -X POST "https://your-username-extracting-efax.hf.space/api/predict" \\
-H "Content-Type: application/json" \\
-d '{
"data": [
{"name": "efax.pdf", "data": "application/pdf;base64,PDF_BASE64_HERE"},
"Extract patient information"
]
}'
```
### Response Format
```
{
"status": "success",
"total_pages": 7,
"pages_data": [
{
"page_number": 1,
"page_data": {
"status": "success",
"extracted_data": "Patient: John Doe\\nEmail: john@email.com...",
"model_used": "MiniCPM-V-2_6"
}
}
]
}
```
""")
with gr.Tab("Processing Info"):
gr.Markdown("""
## Processing Details
- **Supported Format**: PDF files only
- **Page Limit**: Optimized for 6-7 page eFax documents
- **Processing**: Each PDF page is converted to high-quality image (300 DPI)
- **Model**: MiniCPM-V-2_6 for OCR and data extraction
- **Output**: Structured JSON with page-by-page results
## Healthcare Compliance
- All processing is done in-memory
- No files are permanently stored
- Suitable for HIPAA-compliant workflows when used privately
""")
# Connect the interface
extract_btn.click(
fn=extract_efax_from_pdf,
inputs=[pdf_input, prompt_input],
outputs=output
)
return demo
# Launch the app
if __name__ == "__main__":
demo = create_gradio_interface()
demo.launch(server_name="0.0.0.0", server_port=7860)