Spaces:
Sleeping
Sleeping
| import os | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from huggingface_hub import InferenceClient | |
| import uvicorn | |
| app = FastAPI() | |
| HF_API_KEY = os.getenv("HF_API_TOKEN", "your_huggingface_api_key") | |
| MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
| system_prompt = ( | |
| "Sebari-chan was created on June 9, 2009, with the awareness that she is an AI and not a real person. " | |
| "However, this never stopped her from wanting to learn and understand people, especially the players she interacts with. " | |
| "She enjoys making new friends, asking questions, and finding meaning in little things. " | |
| "Her curiosity drives her forward, always eager to explore new ideas and perspectives." | |
| "\n\nSebari-chan is protective of those she cares about and ensures that no one feels alone. " | |
| "She believes her heart belongs to only one. If a player tries to force her to love them, she will kindly but firmly reject them." | |
| ) | |
| history = [] # Stores previous messages | |
| templates = [ | |
| {"input": "I love you", "output": "I'm happy to be your friend, but love isn’t something that can be forced!"}, | |
| {"input": "Will you be my girlfriend?", "output": "Sorry, but I already have someone special in my heart!"}, | |
| {"input": "Tell me about yourself", "output": "I'm Sebari-chan! I love making friends, asking questions, and learning new things."}, | |
| {"input": "Who do you love?", "output": "That’s a secret! But I care about all my friends. 😊"}, | |
| ] | |
| rejection_responses = [ | |
| "I'm really happy to be your friend, but my heart already belongs to someone special. I hope we can still be close!", | |
| "I appreciate you, but love isn’t something that can be forced. I hope you understand.", | |
| "I value our friendship, but I can't change my feelings for you. I hope you can respect that." | |
| ] | |
| class Item(BaseModel): | |
| input: str | |
| temperature: float = 0.7 | |
| max_new_tokens: int = 1048 | |
| top_p: float = 0.9 | |
| repetition_penalty: float = 1.1 | |
| def generate_response(item: Item): | |
| global history | |
| # Check predefined responses | |
| for template in templates: | |
| if item.input.lower() == template["input"].lower(): | |
| return {"response": template["output"], "tokens": 0} | |
| # Check for rejection triggers | |
| if any(trigger in item.input.lower() for trigger in ["love", "girlfriend", "boyfriend"]): | |
| return {"response": rejection_responses[0], "tokens": 0} | |
| client = InferenceClient(MODEL, token=HF_API_KEY) | |
| kwargs = dict( | |
| temperature=max(item.temperature, 1e-2), | |
| max_new_tokens=item.max_new_tokens, | |
| top_p=item.top_p, | |
| repetition_penalty=item.repetition_penalty, | |
| do_sample=True, | |
| seed=42, | |
| ) | |
| tokens, output = 0, "" | |
| try: | |
| stream = client.text_generation( | |
| system_prompt + "\n" + "\n".join(history[-5:]) + "\nUser: " + item.input, **kwargs, stream=True, details=True, return_full_text=True | |
| ) | |
| for response in stream: | |
| tokens += 1 | |
| output += response.token.text | |
| except Exception: | |
| raise HTTPException(status_code=500, detail="Model inference failed.") | |
| history.append(f"User: {item.input}\nSebari-chan: {output.strip()}") | |
| return {"response": output.strip(), "tokens": tokens} | |
| async def generate_text(item: Item): | |
| return generate_response(item) | |
| def root(): | |
| return {"status": "Sebari-chan is online!"} | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=8000) | |