Spaces:
Paused
Paused
File size: 2,450 Bytes
521177a 8d31e8b 521177a ada6a86 521177a | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 | import os, io, time, contextlib, torch
from fastapi import FastAPI, UploadFile, File
from PIL import Image
from transformers import (VisionEncoderDecoderModel, AutoTokenizer, AutoImageProcessor,
BitsAndBytesConfig)
from huggingface_hub import login
login(token=os.environ["HF_TOKEN"])
MODEL_ID = os.getenv("MODEL_ID", "Parsa2025AI/r2gen-swin-cerebras")
GEN_MAX_LEN = int(os.getenv("GEN_MAX_LEN", "192"))
NUM_BEAMS = int(os.getenv("NUM_BEAMS", "1"))
app = FastAPI(title="R2Gen API (FastAPI on Spaces)")
# Quantization + auto device map works on CPU or GPU Space
bnb = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
image_processor = AutoImageProcessor.from_pretrained(MODEL_ID)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = VisionEncoderDecoderModel.from_pretrained(
MODEL_ID, quantization_config=bnb, device_map="auto", offload_folder="/data/offload"
)
model.eval()
# IDs for generation
if model.config.pad_token_id is None and tokenizer.pad_token_id is not None:
model.config.pad_token_id = tokenizer.pad_token_id
if model.config.eos_token_id is None and tokenizer.eos_token_id is not None:
model.config.eos_token_id = tokenizer.eos_token_id
@app.get("/health")
def health():
return {"ok": True, "model": MODEL_ID}
@app.post("/generate")
def generate(file: UploadFile = File(...)):
img = Image.open(io.BytesIO(file.file.read())).convert("RGB")
inputs = image_processor(img, return_tensors="pt")
# Match encoder dtype/device (important when quantized/offloaded)
enc_param = next(model.encoder.parameters())
pixel_values = inputs.pixel_values.to(device=enc_param.device, dtype=enc_param.dtype)
gen_kwargs = dict(max_length=GEN_MAX_LEN, num_beams=NUM_BEAMS,
pad_token_id=model.config.pad_token_id, eos_token_id=model.config.eos_token_id)
t0 = time.time()
with torch.inference_mode():
use_amp = (enc_param.device.type == "cuda" and enc_param.dtype in (torch.float16, torch.bfloat16))
ctx = torch.autocast("cuda", dtype=enc_param.dtype) if use_amp else contextlib.nullcontext()
with ctx:
out = model.generate(pixel_values=pixel_values, **gen_kwargs)
text = tokenizer.decode(out[0], skip_special_tokens=True).strip()
return {"text": text, "ms": int((time.time() - t0) * 1000)}
|