Spaces:
Running
Running
| # โโโ flash_attn Mock โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| import sys | |
| import types | |
| import importlib.util | |
| flash_mock = types.ModuleType("flash_attn") | |
| flash_mock.__version__ = "2.0.0" | |
| flash_mock.__spec__ = importlib.util.spec_from_loader("flash_attn", loader=None) | |
| sys.modules["flash_attn"] = flash_mock | |
| sys.modules["flash_attn.flash_attn_interface"] = types.ModuleType("flash_attn.flash_attn_interface") | |
| sys.modules["flash_attn.bert_padding"] = types.ModuleType("flash_attn.bert_padding") | |
| # โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| import io | |
| import time | |
| import httpx | |
| import torch | |
| from PIL import Image | |
| from transformers import AutoProcessor, AutoModelForCausalLM | |
| from fastapi import FastAPI, HTTPException, UploadFile, File | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import JSONResponse | |
| from pydantic import BaseModel | |
| from contextlib import asynccontextmanager | |
| from typing import Optional | |
| MODEL_ID = "microsoft/Florence-2-large-ft" | |
| VQA_QUESTION = ( | |
| "Is there a woman or any part of a woman's body in this image? Answer yes or no only." | |
| ) | |
| MODEL_DATA = {} | |
| async def lifespan(app: FastAPI): | |
| print(f"๐ฅ Loading {MODEL_ID}...") | |
| start = time.time() | |
| MODEL_DATA["processor"] = AutoProcessor.from_pretrained( | |
| MODEL_ID, trust_remote_code=True | |
| ) | |
| MODEL_DATA["model"] = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float32, | |
| trust_remote_code=True, | |
| attn_implementation="eager" | |
| ).eval() | |
| print(f"โ Model ready in {time.time()-start:.1f}s") | |
| yield | |
| MODEL_DATA.clear() | |
| app = FastAPI( | |
| title="AI Shield - Female Detection API", | |
| description="Florence-2-large-ft | VQA | Compatible with AI Shield Chrome Extension", | |
| version="5.0.0", | |
| lifespan=lifespan | |
| ) | |
| # โโโ CORS: ุถุฑูุฑู ูุฅุถุงูุฉ Chrome โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # โโโ Schemas โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| class ImageUrlRequest(BaseModel): | |
| image_url: str # ู ู ุฅุถุงูุฉ Chrome | |
| # โโโ ุฏุงูุฉ ุงูุชุญููู ุงูู ุดุชุฑูุฉ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| def analyze_image(image: Image.Image) -> dict: | |
| processor = MODEL_DATA["processor"] | |
| model = MODEL_DATA["model"] | |
| task = "<VQA>" | |
| prompt = f"{task}{VQA_QUESTION}" | |
| inputs = processor(text=prompt, images=image, return_tensors="pt") | |
| start_time = time.time() | |
| with torch.no_grad(): | |
| generated_ids = model.generate( | |
| input_ids=inputs["input_ids"], | |
| pixel_values=inputs["pixel_values"], | |
| max_new_tokens=10, | |
| num_beams=3, | |
| do_sample=False | |
| ) | |
| generated_text = processor.batch_decode(generated_ids, skip_special_tokens=False)[0] | |
| parsed = processor.post_process_generation( | |
| generated_text, | |
| task=task, | |
| image_size=(image.width, image.height) | |
| ) | |
| elapsed = round(time.time() - start_time, 2) | |
| answer = parsed.get(task, "").strip() | |
| # โโโ ู ูุทู ุงููุฑุงุฑ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| a = answer.lower() | |
| if a == "no" or a.startswith("no"): | |
| decision, reason = "ALLOW", "model_answered_no" | |
| elif "yes" in a: | |
| decision, reason = "BLOCK", "model_answered_yes" | |
| else: | |
| decision, reason = "BLOCK", "unexpected_answer_blocked_for_safety" | |
| return { | |
| "decision": decision, # ALLOW | BLOCK (ุจุงูุญุฑูู ุงููุจูุฑุฉ ูุชุชูุงูู ู ุน ุงูุฅุถุงูุฉ) | |
| "reason": reason, | |
| "vqa_answer": answer, | |
| "execution_time": elapsed, | |
| "status": "success" | |
| } | |
| # โโโ Health Check โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| def health(): | |
| return {"status": "ok", "model_loaded": "model" in MODEL_DATA} | |
| # โโโ Endpoint 1: ู ู ุฅุถุงูุฉ Chrome (image_url) โโโโโโโโโโโโโโโโโโโโโ | |
| # background.js ูุฑุณู: POST /analyze {"image_url": "https://..."} | |
| async def analyze_from_url(request: ImageUrlRequest): | |
| try: | |
| async with httpx.AsyncClient(timeout=30) as client: | |
| response = await client.get(request.image_url) | |
| response.raise_for_status() | |
| image_bytes = response.content | |
| except Exception as e: | |
| raise HTTPException(status_code=400, detail=f"ูุดู ุชุญู ูู ุงูุตูุฑุฉ ู ู URL: {str(e)}") | |
| try: | |
| image = Image.open(io.BytesIO(image_bytes)).convert("RGB") | |
| except Exception as e: | |
| raise HTTPException(status_code=400, detail=f"ุฎุทุฃ ูู ูุฑุงุกุฉ ุงูุตูุฑุฉ: {str(e)}") | |
| return analyze_image(image) | |
| # โโโ Endpoint 2: ุงุฎุชุจุงุฑ ูุฏูู (ุฑูุน ู ูู) โโโโโโโโโโโโโโโโโโโโโโโโโโโ | |
| async def analyze_from_file(file: UploadFile = File(...)): | |
| if not file.content_type.startswith("image/"): | |
| raise HTTPException(status_code=400, detail="ุงูู ูู ููุณ ุตูุฑุฉ") | |
| try: | |
| image = Image.open(io.BytesIO(await file.read())).convert("RGB") | |
| except Exception as e: | |
| raise HTTPException(status_code=400, detail=f"ุฎุทุฃ ูู ูุฑุงุกุฉ ุงูุตูุฑุฉ: {str(e)}") | |
| return analyze_image(image) | |
| if __name__ == "__main__": | |
| import uvicorn | |
| uvicorn.run(app, host="0.0.0.0", port=7860) |