chipling commited on
Commit
9ceea25
·
verified ·
1 Parent(s): 2fd0c28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -70
app.py CHANGED
@@ -1,70 +1,29 @@
1
- import os
2
- import shutil
3
- import tempfile
4
- import json
5
- from fastapi import FastAPI, UploadFile, File, HTTPException
6
-
7
- # --- CPU OPTIMIZATION FLAGS ---
8
- # Limit threads for HF Free Tier (2 vCPUs)
9
- os.environ["OMP_NUM_THREADS"] = "2"
10
-
11
- # THE MAGIC FIX: Disable the buggy PIR compiler but KEEP MKLDNN!
12
- os.environ["FLAGS_enable_pir_api"] = "0"
13
- os.environ["FLAGS_use_mkldnn"] = "1"
14
-
15
- # IMPORT THE NEW V3 PIPELINE (Must be imported AFTER setting the flags)
16
- from paddleocr import PPStructureV3
17
-
18
- app = FastAPI(
19
- title="Document Ingestion API",
20
- description="Lightweight PP-StructureV3 extraction"
21
- )
22
-
23
- print("Initializing PP-StructureV3 (MKLDNN Enabled via AST Executor)...")
24
- pipeline = PPStructureV3()
25
- print("Pipeline ready!")
26
-
27
- @app.post("/ingest")
28
- async def ingest_document(file: UploadFile = File(...)):
29
- if not file.filename:
30
- raise HTTPException(status_code=400, detail="No file provided")
31
-
32
- with tempfile.TemporaryDirectory() as temp_dir:
33
- file_path = os.path.join(temp_dir, file.filename)
34
-
35
- try:
36
- # 1. Save file
37
- with open(file_path, "wb") as buffer:
38
- shutil.copyfileobj(file.file, buffer)
39
-
40
- # 2. Predict
41
- output = pipeline.predict(file_path)
42
-
43
- parsed_pages = []
44
- for page_num, res in enumerate(output):
45
- md_path = os.path.join(temp_dir, f"page_{page_num + 1}.md")
46
- json_path = os.path.join(temp_dir, f"page_{page_num + 1}.json")
47
-
48
- res.save_to_markdown(save_path=md_path)
49
- res.save_to_json(save_path=json_path)
50
-
51
- with open(md_path, "r", encoding="utf-8") as f:
52
- md_content = f.read()
53
-
54
- with open(json_path, "r", encoding="utf-8") as f:
55
- json_content = json.load(f)
56
-
57
- parsed_pages.append({
58
- "page": page_num + 1,
59
- "markdown": md_content,
60
- "json_data": json_content
61
- })
62
-
63
- return {
64
- "status": "success",
65
- "filename": file.filename,
66
- "data": parsed_pages
67
- }
68
-
69
- except Exception as e:
70
- raise HTTPException(status_code=500, detail=str(e))
 
1
+ from fastapi import FastAPI, UploadFile, File
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ from PIL import Image
4
+ import torch
5
+ import io
6
+
7
+ app = FastAPI()
8
+
9
+ # Load model (optimized for CPU/Low RAM)
10
+ model_id = "THUDM/glm-4v-9b" # Or the specific 0.9B GLM-OCR variant
11
+ tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
12
+ model = model_id.from_pretrained(model_id, trust_remote_code=True).cpu().eval()
13
+
14
+ @app.post("/convert")
15
+ async def convert_image(file: UploadFile = File(...)):
16
+ image_data = await file.read()
17
+ image = Image.open(io.BytesIO(image_data)).convert("RGB")
18
+
19
+ # Specific prompt to trigger Chart-to-HTML
20
+ prompt = "Read this chart and output the data as a clean HTML table with headers."
21
+
22
+ inputs = tokenizer.apply_chat_template([{"role": "user", "image": image, "content": prompt}],
23
+ add_generation_prompt=True, tokenize=True, return_tensors="pt")
24
+
25
+ with torch.no_grad():
26
+ outputs = model.generate(**inputs, max_new_tokens=1000)
27
+
28
+ response = tokenizer.decode(outputs[0])
29
+ return {"html_result": response}