hbatali2020 commited on
Commit
094b62f
·
verified ·
1 Parent(s): 57099f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -93
app.py CHANGED
@@ -1,107 +1,73 @@
1
- from fastapi import FastAPI, UploadFile, File, HTTPException
2
  from PIL import Image
3
  import io
4
- from threading import Lock
5
- from datetime import datetime
 
6
 
7
- print("Starting API...")
8
 
9
- app = FastAPI()
 
 
10
 
11
- MODEL_ID = "microsoft/Florence-2-large"
12
- processor = None
13
- model = None
14
- torch = None
15
- AutoProcessor = None
16
- AutoModelForCausalLM = None
17
- model_lock = Lock()
18
- model_load_error = None
19
- model_loaded_at = None
20
 
21
- def ensure_model_loaded():
22
- global processor, model, torch, AutoProcessor, AutoModelForCausalLM, model_load_error, model_loaded_at
23
- if processor is not None and model is not None:
24
- return
25
- with model_lock:
26
- if processor is not None and model is not None:
27
- return
28
- print("Loading model...")
29
- try:
30
- if torch is None or AutoProcessor is None or AutoModelForCausalLM is None:
31
- import torch as _torch
32
- from transformers import AutoProcessor as _AutoProcessor, AutoModelForCausalLM as _AutoModelForCausalLM
33
- torch = _torch
34
- AutoProcessor = _AutoProcessor
35
- AutoModelForCausalLM = _AutoModelForCausalLM
36
 
37
- processor = AutoProcessor.from_pretrained(MODEL_ID)
38
- model = AutoModelForCausalLM.from_pretrained(
39
- MODEL_ID,
40
- torch_dtype=torch.float32,
41
- trust_remote_code=True
42
- )
43
- model.to("cpu")
44
- model.eval()
45
- model_loaded_at = datetime.utcnow().isoformat() + "Z"
46
- model_load_error = None
47
- print("Model loaded successfully")
48
- except Exception as exc:
49
- model_load_error = str(exc)
50
- print(f"Model loading failed: {model_load_error}")
51
- raise
52
-
53
- @app.get("/")
54
- def home():
55
- return {
56
- "status": "API running",
57
- "model_id": MODEL_ID,
58
- "model_loaded": processor is not None and model is not None,
59
- "model_loaded_at": model_loaded_at,
60
- "model_load_error": model_load_error
61
- }
62
-
63
- @app.get("/healthz")
64
- def healthz():
65
- return {"ok": True, "time": datetime.utcnow().isoformat() + "Z"}
66
-
67
- @app.post("/caption")
68
- async def caption(file: UploadFile = File(...)):
69
  try:
70
- ensure_model_loaded()
71
- except Exception as exc:
72
- raise HTTPException(status_code=500, detail=f"Model load failed: {exc}")
 
 
 
 
 
 
73
 
74
- image_bytes = await file.read()
75
- try:
76
- image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
77
- except Exception as exc:
78
- raise HTTPException(status_code=400, detail=f"Invalid image file: {exc}")
79
-
80
- prompt = "<CAPTION>"
 
 
 
81
 
82
- inputs = processor(
83
- text=prompt,
84
- images=image,
85
- return_tensors="pt"
86
- )
87
 
88
- with torch.inference_mode():
89
- generated_ids = model.generate(
90
- input_ids=inputs["input_ids"],
91
- pixel_values=inputs["pixel_values"],
92
- max_new_tokens=50
93
- )
 
 
 
 
94
 
95
- generated_text = processor.batch_decode(
96
- generated_ids,
97
- skip_special_tokens=True
98
- )[0]
99
 
100
- parsed = processor.post_process_generation(
101
- generated_text,
102
- task=prompt,
103
- image_size=(image.width, image.height)
104
- )
105
- caption = parsed.get(prompt, generated_text) if isinstance(parsed, dict) else str(parsed)
106
 
107
- return {"caption": caption}
 
 
 
1
+ import torch
2
  from PIL import Image
3
  import io
4
+ from fastapi import FastAPI, File, UploadFile
5
+ from fastapi.responses import JSONResponse
6
+ from transformers import AutoProcessor, AutoModelForCausalLM
7
 
8
+ app = FastAPI(title="Florence-2-Logit-Shield")
9
 
10
+ # إعداد النموذج
11
+ model_id = "microsoft/Florence-2-large"
12
+ device = "cpu"
13
 
14
+ print(f"⏳ Loading {model_id}...")
15
+ # Florence-2 يتطلب trust_remote_code=True
16
+ model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to(device).eval()
17
+ processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
 
 
 
 
 
18
 
19
+ # استخراج معرفات التوكنات (Florence يستخدم توكنات مختلفة قليلاً)
20
+ # سنبحث عن توكنات "Yes" و "No" في قاموس النموذج
21
+ YES_TOKEN_ID = processor.tokenizer.convert_tokens_to_ids("Yes")
22
+ NO_TOKEN_ID = processor.tokenizer.convert_tokens_to_ids("No")
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ @app.post("/test-image")
25
+ async def test_image(file: UploadFile = File(...)):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  try:
27
+ contents = await file.read()
28
+ image = Image.open(io.BytesIO(contents)).convert("RGB")
29
+
30
+ # Florence-2 يعمل بنظام المهام (Task-based)
31
+ # نستخدم مهمة الإجابة على الأسئلة الوصفية
32
+ prompt = "<VQA> Is there an adult woman or any adult female body part? Answer No if it is a child."
33
+
34
+ # تجهيز المدخلات
35
+ inputs = processor(text=prompt, images=image, return_tensors="pt").to(device)
36
 
37
+ with torch.no_grad():
38
+ # استخراج المخرجات للحصول على اللوجيت
39
+ outputs = model(**inputs)
40
+
41
+ # اللوجيت لآخر توكن تم إنتاجه
42
+ logits = outputs.logits[:, -1, :]
43
+ probs = torch.softmax(logits, dim=-1)
44
+
45
+ prob_yes = probs[0, YES_TOKEN_ID].item()
46
+ prob_no = probs[0, NO_TOKEN_ID].item()
47
 
48
+ # العتبة (Threshold) لـ Florence-2
49
+ # هذا النموذج دقيق جداً، لذا نبدأ بـ 0.05 (5%)
50
+ THRESHOLD = 0.05
51
+ is_unsafe = prob_yes > THRESHOLD
 
52
 
53
+ return {
54
+ "is_blur_needed": is_unsafe,
55
+ "decision": "BLOCK" if is_unsafe else "ALLOW",
56
+ "analysis": {
57
+ "female_score": round(prob_yes * 100, 4),
58
+ "no_score": round(prob_no * 100, 4),
59
+ "threshold": THRESHOLD
60
+ },
61
+ "model": "Florence-2-large"
62
+ }
63
 
64
+ except Exception as e:
65
+ return JSONResponse(status_code=500, content={"error": str(e)})
 
 
66
 
67
+ @app.get("/")
68
+ def home():
69
+ return {"message": "Florence-2 Shield is Running"}
 
 
 
70
 
71
+ if __name__ == "__main__":
72
+ import uvicorn
73
+ uvicorn.run(app, host="0.0.0.0", port=7860)