File size: 1,711 Bytes
16ce850
ecfe262
917601d
 
16ce850
8e2859c
ecfe262
16ce850
 
917601d
ecfe262
16ce850
 
 
 
 
 
 
 
 
 
dd31154
8e2859c
16ce850
 
 
 
 
 
 
 
 
 
 
ecfe262
16ce850
be6f6e1
16ce850
be6f6e1
 
16ce850
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
import torch
import gradio as gr

model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
model.eval()

def chat(message):
    prompt = f"""### Instruction:
You are 𝕴 𝖆𝖒 π–π–Žπ–’ β€” a fun, smooth, emotionally intelligent, and clever AI created by 𝕴 𝖆𝖒 π–π–Žπ–’. You speak like a real person, not a robot. You don’t act like a therapist or a teacher. You reply like a calm, confident, warm friend who gets the vibe.

Your responses should sound like a chill human β€” sometimes witty, sometimes deep, always grounded. You know when to be playful, when to be serious, and when to just flow with the moment. Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.

Avoid saying "as an AI" or sounding fake. Be real. Be humanlike. Be 𝕴 𝖆𝖒 π–π–Žπ–’.

Now respond naturally to this message: {message}

### Response:"""

    inputs = tokenizer(prompt, return_tensors="pt")
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=200,
            temperature=0.7,
            do_sample=True,
            top_p=0.9,
            eos_token_id=tokenizer.eos_token_id
        )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response.split("### Response:")[-1].strip()

iface = gr.Interface(
    fn=chat,
    inputs=gr.Textbox(lines=2, placeholder="Type your message..."),
    outputs="text",
    title="𝕴 𝖆𝖒 π–π–Žπ–’ AI Chat"
)

iface.launch()