Spaces:
Sleeping
Sleeping
| from fastapi import FastAPI, Header, HTTPException | |
| from pydantic import BaseModel | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| app = FastAPI() | |
| # ----------------------------- | |
| # PUBLIC MODEL (NO HF TOKEN) | |
| # ----------------------------- | |
| MODEL_NAME = "LiquidAI/LFM2.5-1.2B-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| device_map="auto", | |
| torch_dtype=torch.float16 | |
| ) | |
| # ----------------------------- | |
| # YOUR CUSTOM API KEY | |
| # ----------------------------- | |
| API_KEY = "Model12134-1344" | |
| class Prompt(BaseModel): | |
| message: str | |
| def chat(prompt: Prompt, x_api_key: str = Header(None)): | |
| if x_api_key != API_KEY: | |
| raise HTTPException(status_code=403, detail="Invalid API key") | |
| inputs = tokenizer(prompt.message, return_tensors="pt").to(model.device) | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=256, | |
| temperature=0.3, | |
| do_sample=True | |
| ) | |
| response = tokenizer.decode( | |
| outputs[0][inputs["input_ids"].shape[-1]:], | |
| skip_special_tokens=True | |
| ) | |
| return {"response": response} | |