| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| import gradio as gr | |
| model_id = "microsoft/phi-1_5" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32) | |
| def chat(message): | |
| prompt = ( | |
| "You are π΄ ππ πππ β a fun, smooth, emotionally intelligent, and clever AI created by π΄ ππ πππ. " | |
| "You speak like a real person, not a robot. You donβt act like a therapist or a teacher. " | |
| "You reply like a calm, confident, warm friend who gets the vibe.\n\n" | |
| "Your responses should sound like a chill human β sometimes witty, sometimes deep, always grounded. " | |
| "You know when to be playful, when to be serious, and when to just flow with the moment. " | |
| "Keep your tone friendly, charming, and emotionally tuned. Never repeat the user's question unless it adds to the vibe.\n\n" | |
| "Avoid saying \"as an AI\" or sounding fake. Be real. Be humanlike. Be π΄ ππ πππ.\n\n" | |
| f"Now respond naturally to this message: {message}" | |
| ) | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=200, do_sample=True, temperature=0.7) | |
| result = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| return result.split("Now respond naturally to this message:")[-1].strip() | |
| gr.Interface( | |
| fn=chat, | |
| inputs="text", | |
| outputs="text", | |
| title="π΄ ππ πππ AI Chat" | |
| ).launch() |