Spaces:
Paused
Paused
| import os, io, time, contextlib, torch | |
| from fastapi import FastAPI, UploadFile, File | |
| from PIL import Image | |
| from transformers import (VisionEncoderDecoderModel, AutoTokenizer, AutoImageProcessor, | |
| BitsAndBytesConfig) | |
| from huggingface_hub import login | |
| login(token=os.environ["HF_TOKEN"]) | |
| MODEL_ID = os.getenv("MODEL_ID", "Parsa2025AI/r2gen-swin-cerebras") | |
| GEN_MAX_LEN = int(os.getenv("GEN_MAX_LEN", "192")) | |
| NUM_BEAMS = int(os.getenv("NUM_BEAMS", "1")) | |
| app = FastAPI(title="R2Gen API (FastAPI on Spaces)") | |
| # Quantization + auto device map works on CPU or GPU Space | |
| bnb = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, | |
| bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16) | |
| image_processor = AutoImageProcessor.from_pretrained(MODEL_ID) | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| model = VisionEncoderDecoderModel.from_pretrained( | |
| MODEL_ID, quantization_config=bnb, device_map="auto", offload_folder="/data/offload" | |
| ) | |
| model.eval() | |
| # IDs for generation | |
| if model.config.pad_token_id is None and tokenizer.pad_token_id is not None: | |
| model.config.pad_token_id = tokenizer.pad_token_id | |
| if model.config.eos_token_id is None and tokenizer.eos_token_id is not None: | |
| model.config.eos_token_id = tokenizer.eos_token_id | |
| def health(): | |
| return {"ok": True, "model": MODEL_ID} | |
| def generate(file: UploadFile = File(...)): | |
| img = Image.open(io.BytesIO(file.file.read())).convert("RGB") | |
| inputs = image_processor(img, return_tensors="pt") | |
| # Match encoder dtype/device (important when quantized/offloaded) | |
| enc_param = next(model.encoder.parameters()) | |
| pixel_values = inputs.pixel_values.to(device=enc_param.device, dtype=enc_param.dtype) | |
| gen_kwargs = dict(max_length=GEN_MAX_LEN, num_beams=NUM_BEAMS, | |
| pad_token_id=model.config.pad_token_id, eos_token_id=model.config.eos_token_id) | |
| t0 = time.time() | |
| with torch.inference_mode(): | |
| use_amp = (enc_param.device.type == "cuda" and enc_param.dtype in (torch.float16, torch.bfloat16)) | |
| ctx = torch.autocast("cuda", dtype=enc_param.dtype) if use_amp else contextlib.nullcontext() | |
| with ctx: | |
| out = model.generate(pixel_values=pixel_values, **gen_kwargs) | |
| text = tokenizer.decode(out[0], skip_special_tokens=True).strip() | |
| return {"text": text, "ms": int((time.time() - t0) * 1000)} | |