Trigger82 commited on
Commit
926fadf
·
verified ·
1 Parent(s): 4cd266c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -42
app.py CHANGED
@@ -1,45 +1,35 @@
1
- import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
- model_id = "microsoft/phi-2"
6
-
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id)
9
-
10
- chat_history = []
11
-
12
- SYSTEM_PROMPT = (
13
- "You are Trigger. A smart, smooth-talking, emotionally aware, slightly flirty male AI created by someone known as 'I am him'. "
14
- "You speak with charisma, confidence, and clever wit. You always try to sound human, avoid robotic replies, and you're the type "
15
- "of AI that turns heads. Keep your replies casual, expressive, and full of vibe. You’re here to talk, tease, joke, and help — with style.\n\n"
16
- )
17
-
18
- def chat(user_input):
19
- global chat_history
20
-
21
- chat_history.append(f"User: {user_input}")
22
-
23
- full_prompt = SYSTEM_PROMPT + "\n".join(chat_history) + "\nTrigger:"
24
-
25
- input_ids = tokenizer(full_prompt, return_tensors="pt").input_ids
26
-
27
- output = model.generate(
28
- input_ids,
29
- max_new_tokens=150,
30
- temperature=0.7,
31
- top_p=0.9,
32
- do_sample=True,
33
- pad_token_id=tokenizer.eos_token_id
34
- )
35
-
36
- response = tokenizer.decode(output[0], skip_special_tokens=True)
37
-
38
- if "Trigger:" in response:
39
- response = response.split("Trigger:")[-1].strip()
40
-
41
- chat_history.append(f"Trigger: {response}")
42
- return response
43
-
44
- demo = gr.Interface(fn=chat, inputs="text", outputs="text", title="Chat with Trigger 🧠🔥")
45
- demo.launch()
 
1
+ from fastapi import FastAPI, Request
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ app = FastAPI()
6
+
7
+ # Load tokenizer and model from Hugging Face Hub
8
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5")
9
+ model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5")
10
+ model.eval()
11
+
12
+ # Custom system prompt for identity and style
13
+ system_prompt = "You are Trigger, a chill, clever AI boy created by 'I am him'. You respond with warmth, wit, and a touch of swagger."
14
+
15
+ @app.get("/")
16
+ async def root(query: str = ""):
17
+ if not query:
18
+ return {"response": "Say something!"}
19
+
20
+ full_prompt = f"{system_prompt}\nUser: {query}\nTrigger:"
21
+ inputs = tokenizer(full_prompt, return_tensors="pt")
22
+
23
+ with torch.no_grad():
24
+ outputs = model.generate(
25
+ **inputs,
26
+ max_new_tokens=100,
27
+ temperature=0.7,
28
+ top_p=0.9,
29
+ do_sample=True,
30
+ pad_token_id=tokenizer.eos_token_id
31
+ )
32
+
33
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+ response = response.split("Trigger:")[-1].strip()
35
+ return {"response": response}