Trigger82 commited on
Commit
a2bac2c
·
verified ·
1 Parent(s): 15ebbc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -24
app.py CHANGED
@@ -1,32 +1,32 @@
1
  from fastapi import FastAPI
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
- import gradio as gr
5
- import uvicorn
6
- import threading
7
  from tinydb import TinyDB, Query
 
 
 
8
 
9
  # Load model
10
  tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5")
11
  model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5")
12
  model.eval()
13
 
14
- # Setup DB
15
  db = TinyDB("memory.json")
16
  User = Query()
17
 
18
- # API
19
- app = FastAPI()
20
-
21
  @app.get("/")
22
  async def chat(query: str = "", number: str = "", prompt: str = ""):
23
  if not query or not number:
24
- return {"response": "Trigger needs both ?query= and &number= 🧠"}
25
 
26
  record = db.get(User.number == number)
27
-
28
  if not record:
29
- record = {"number": number, "history": [], "prompt": "You are Trigger, a clever, emotional AI boy created by 'I am him'. Speak like a witty, chill human friend."}
 
 
 
 
30
  db.insert(record)
31
 
32
  if prompt:
@@ -36,11 +36,11 @@ async def chat(query: str = "", number: str = "", prompt: str = ""):
36
  history = record["history"]
37
  system_prompt = record.get("prompt", "")
38
 
39
- # Add new user input
40
  history.append(f"User: {query}")
41
  trimmed = history[-6:]
42
 
43
- # Build prompt
44
  full_prompt = system_prompt + "\n" + "\n".join(trimmed) + "\nTrigger:"
45
  inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True)
46
 
@@ -57,19 +57,12 @@ async def chat(query: str = "", number: str = "", prompt: str = ""):
57
  decoded = tokenizer.decode(output[0], skip_special_tokens=True)
58
  reply = decoded.split("Trigger:")[-1].strip()
59
 
 
60
  history.append(f"Trigger: {reply}")
61
- db.update({"history": history, "prompt": system_prompt, "number": number}, User.number == number)
62
 
63
  return {"response": reply}
64
 
65
- # Run API
66
- def run_api():
67
- uvicorn.run(app, host="0.0.0.0", port=7860)
68
-
69
- threading.Thread(target=run_api).start()
70
-
71
- # Gradio dummy
72
- def keep_alive():
73
- return "Trigger’s up."
74
-
75
- gr.Interface(fn=keep_alive, inputs=[], outputs="text").launch()
 
1
  from fastapi import FastAPI
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
 
 
4
  from tinydb import TinyDB, Query
5
+ import uvicorn
6
+
7
+ app = FastAPI()
8
 
9
  # Load model
10
  tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5")
11
  model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5")
12
  model.eval()
13
 
14
+ # Memory setup
15
  db = TinyDB("memory.json")
16
  User = Query()
17
 
 
 
 
18
  @app.get("/")
19
  async def chat(query: str = "", number: str = "", prompt: str = ""):
20
  if not query or not number:
21
+ return {"response": "Missing query or number."}
22
 
23
  record = db.get(User.number == number)
 
24
  if not record:
25
+ record = {
26
+ "number": number,
27
+ "history": [],
28
+ "prompt": "You are Trigger, a clever, emotional AI boy created by 'I am him'. Speak like a witty, chill human friend."
29
+ }
30
  db.insert(record)
31
 
32
  if prompt:
 
36
  history = record["history"]
37
  system_prompt = record.get("prompt", "")
38
 
39
+ # Append new input
40
  history.append(f"User: {query}")
41
  trimmed = history[-6:]
42
 
43
+ # Construct final prompt
44
  full_prompt = system_prompt + "\n" + "\n".join(trimmed) + "\nTrigger:"
45
  inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True)
46
 
 
57
  decoded = tokenizer.decode(output[0], skip_special_tokens=True)
58
  reply = decoded.split("Trigger:")[-1].strip()
59
 
60
+ # Save memory
61
  history.append(f"Trigger: {reply}")
62
+ db.update({"number": number, "history": history, "prompt": system_prompt}, User.number == number)
63
 
64
  return {"response": reply}
65
 
66
+ # Only runs locally, HF handles it differently
67
+ if __name__ == "__main__":
68
+ uvicorn.run(app, host="0.0.0.0", port=7860)