AniSol-API / app.py
nellaep's picture
Rename anisol.py to app.py
518895e verified
# -*- coding: utf-8 -*-
"""AniSol.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1DiHMlxQx3QEAYivNMOl3olC8jj9mutCZ
"""
from fastapi import FastAPI, Request
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
from fastapi.middleware.cors import CORSMiddleware
import torch
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
model_name = "nellaep/AniSolSenseiModel"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
model.eval()
sensitive_keywords = [
"suicide", "kill myself", "end my life", "self harm", "cutting",
"i want to die", "i want to disappear", "hurt myself", "life isn’t worth it",
"i can’t take it anymore", "no reason to live", "i hate living", "die", "i give up"
]
hotline_message = (
"Your life is valuable twin. If you're feeling overwhelmed, please reach out for help.\n"
"Call or text 988 (U.S. Suicide & Crisis Lifeline) for free, 24/7 support.\n"
)
class InputText(BaseModel):
input: str
@app.post("/generate")
async def generate_response(data: InputText):
user_input = data.input.lower()
if any(keyword in user_input for keyword in sensitive_keywords):
return {
"response": hotline_message
}
prompt = f"Input: {data.input}\nSensei: Kakashi\nOutput:"
inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True)
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=100,
pad_token_id=tokenizer.eos_token_id
)
decoded = tokenizer.decode(output[0], skip_special_tokens=True)
response = decoded.split("Output:")[-1].strip()
return {"response": response}