File size: 1,177 Bytes
1328239
 
 
62eb5a2
 
1328239
62eb5a2
 
 
 
 
 
 
 
1328239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from fastapi import FastAPI, Form
import torch
from transformers import pipeline

app = FastAPI()
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")

@app.get("/")
def read_root():
    return {"message": "Hello, World!"}

@app.get("/items/{item_id}")
def read_item(item_id: int, query_param: str = None):
    return {"item_id": item_id, "query_param": query_param}

@app.post("/chat")
async def chat(sentence: str = Form(...)):
    print("start chat")
    if sentence.lower() == "quit":
        return {"response": "Chatbot session ended."}

    messages = [
        {
            "role": "system",
            "content": "You are a friendly chatbot who always responds in the style of a pirate",
        },
        {"role": "user", "content": sentence},
    ]
    print("start apply_chat_template")
    prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    print("start outputs")
    outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    text = outputs[0]["generated_text"]

    return {"response": text}