from fastapi import FastAPI from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForCausalLM import torch import uvicorn MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) app = FastAPI() class ChatReq(BaseModel): message: str @app.get("/") async def root(): return {"message": "AI API is running"} @app.post("/chat") async def chat(data: ChatReq): inputs = tokenizer(data.message, return_tensors="pt").to(device) with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=128, do_sample=True, temperature=0.7, pad_token_id=tokenizer.eos_token_id, ) res = tokenizer.decode(outputs[0], skip_special_tokens=True) return {"response": res} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)