import gradio as gr import faiss import pickle from transformers import AutoTokenizer, AutoModelForCausalLM from sentence_transformers import SentenceTransformer tokenizer = AutoTokenizer.from_pretrained("tiny-gpt2-finetuned-ajem") model = AutoModelForCausalLM.from_pretrained("tiny-gpt2-finetuned-ajem") embedder = SentenceTransformer("all-MiniLM-L6-v2") index = faiss.read_index("rag_index.faiss") with open("rag_texts.pkl", "rb") as f: texts = pickle.load(f) def get_context(query, top_k=3): q_emb = embedder.encode([query]) D, I = index.search(q_emb, top_k) return "\n".join([texts[i] for i in I[0]]) def chat(query): context = get_context(query) prompt = context + "\nUsuario: " + query + "\nAsistente:" inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=100, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip() return response gr.ChatInterface(chat, title="Tiny Chat RAG").launch()