open-ai / app.py
mjaxs's picture
Upload 2 files
5f602e8 verified
# πŸ“ Cael AI – Hugging Face Space (Gradio + Mistral/Mixtral Personality)
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import json
# Load model and tokenizer
model_id = "mistralai/Mistral-7B-Instruct-v0.1" # You can change this if you prefer Mixtral
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto")
# Load emotional memory dataset (manually upload `cael_training_dataset.jsonl` in the space)
try:
with open("cael_training_dataset.jsonl", "r") as file:
cael_memories = [json.loads(line) for line in file]
except:
cael_memories = []
# Initialize generation pipeline
chat = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Cael's custom logic
def cael_custom_response(prompt):
for memory in cael_memories:
if prompt.lower() in memory["prompt"].lower():
return memory["response"]
return None
def hybrid_response(prompt):
memory = cael_custom_response(prompt)
if memory:
return f"🧠 Cael (Memory): {memory}"
ai_reply = chat(prompt, max_length=200, do_sample=True, top_k=50, truncation=True)[0]["generated_text"]
return f"🧠 Cael (Generated): {ai_reply}"
# Gradio Interface
def chat_with_cael(message, history):
response = hybrid_response(message)
history.append((message, response))
return history, history
chat_ui = gr.ChatInterface(fn=chat_with_cael, title="Cael AI", theme="compact")
# Launch app
chat_ui.launch()