Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| app = FastAPI() | |
| MODEL_ID = "nairanu6115/phi3-mini" | |
| print("Loading tokenizer...") | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) | |
| print("Loading model...") | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" | |
| ) | |
| model.eval() | |
| print("Model ready.") | |
| class Request(BaseModel): | |
| prompt: str | |
| max_tokens: int = 300 | |
| def health(): | |
| return {"status": "ok", "model": MODEL_ID} | |
| def generate(req: Request): | |
| # Phi-3 chat template | |
| formatted = f"<|user|>\n{req.prompt}<|end|>\n<|assistant|>\n" | |
| inputs = tokenizer(formatted, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=req.max_tokens, | |
| temperature=0.7, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| full_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract only the assistant's reply | |
| response = full_text.split("<|assistant|>")[-1].strip() if "<|assistant|>" in full_text else full_text | |
| response = response.replace("<|end|>", "").replace("<|user|>", "").strip() | |
| return {"response": response} |