HFHAB's picture
Update main.py
720725d verified
from fastapi import FastAPI
from pydantic import BaseModel
from huggingface_hub import InferenceClient
import uvicorn
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
app = FastAPI()
model_id = "mistralai/Mistral-7B-v0.1"
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id)
#client = InferenceClient("HFHAB/FinetunedMistralModel")
class Item(BaseModel):
prompt: str
history: list
system_prompt: str
temperature: float = 0.3
max_new_tokens: int = 5000
top_p: float = 0.15
repetition_penalty: float = 1.0
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response} "
prompt += f"</s>[INST] {message} [/INST]"
return prompt
def formatting_func(example):
text = f"### Question: {example['input']}\n ### Answer: {example['output']}"
return text
def generate(item: Item):
temperature = float(item.temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(item.top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=item.max_new_tokens,
top_p=top_p,
repetition_penalty=item.repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history)
inputs = tokenizer(formatted_prompt, return_tensors="pt").to(0)
out = model.generate(**inputs, max_new_tokens=250, temperature = 0.6, top_p=0.95, tok_k=40)
output = tokenizer.decode(out[0], skip_special_tokens=True)
#stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
#output = ""
#for response in stream:
# output += response.token.text
return output
@app.post("/generate/")
async def generate_text(item: Item):
return {"response": generate(item)}