Spaces:
Running
Running
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from huggingface_hub import InferenceClient | |
| app = FastAPI() | |
| client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2") | |
| class PromptRequest(BaseModel): | |
| prompt: str | |
| history: list | |
| def format_prompt(message, history): | |
| system_prompt = "You are Mistral, a gentle and a useful AI assistant. My input is " | |
| prompt = "<s>" | |
| prompt += f"[INST] {system_prompt} [/INST]" | |
| for user_prompt, bot_response in history: | |
| prompt += f"[INST] {user_prompt} [/INST]" | |
| prompt += f" {bot_response}</s> " | |
| prompt += f"[INST] {message} [/INST]" | |
| return prompt | |
| def generate(prompt, history): | |
| formatted_prompt = format_prompt(prompt, history) | |
| stream = client.text_generation(formatted_prompt, stream=True, details=True, return_full_text=False) | |
| output = "" | |
| for response in stream: | |
| output += response.token.text | |
| yield output | |
| async def generate_response(request: PromptRequest): | |
| try: | |
| responses = [] | |
| async for response in generate(request.prompt, request.history): | |
| responses.append(response) | |
| return {"responses": responses} | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |