| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from fastapi import FastAPI |
|
|
|
|
| MODEL_ID = "rasyosef/Phi-1_5-Instruct-v0.1" |
|
|
|
|
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
| model = AutoModelForCausalLM.from_pretrained(MODEL_ID) |
|
|
| app = FastAPI() |
|
|
| @app.get("/chat") |
| def chat(query: str): |
| """ |
| GET /chat?query=Your+question |
| Returns JSON: {"answer": "...model’s reply..."} |
| """ |
|
|
| prompt = ( |
| "<|im_start|>system\nYou are a helpful assistant.<|im_end|>" |
| "<|im_start|>user\n" + query + "<|im_end|>" |
| "<|im_start|>assistant\n" |
| ) |
| inputs = tokenizer(prompt, return_tensors="pt") |
| outputs = model.generate(**inputs, max_new_tokens=200) |
| |
| response = tokenizer.decode( |
| outputs[0][inputs.input_ids.shape[-1]:], |
| skip_special_tokens=True |
| ) |
| return {"answer": response.strip()} |