Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import random | |
| import torch | |
| from huggingface_hub import InferenceClient | |
| # Replace with actual token loading | |
| client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2") | |
| # Install sentence-transformers before running if not already installed | |
| # !pip install -q sentence-transformers | |
| from sentence_transformers import SentenceTransformer | |
| # Load knowledge base | |
| with open("knowledge.txt", "r", encoding="utf-8") as file: | |
| exp_know_text = file.read() | |
| print(exp_know_text) | |
| cleaned_text = exp_know_text.strip() | |
| #the line below chunks based on the enter key. There are other options. | |
| chunks = cleaned_text.split("\n") | |
| #in line above use "." instead to chunk by sentence. Or " " to chunk by word. | |
| cleaned_chunks = [] | |
| cleaned_chunks = [chunk.strip() for chunk in chunks if chunk.strip()] | |
| print(cleaned_chunks) | |
| # Embeddings (Matt's is different and combined with below) | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| chunk_embeddings = model.encode(chunks, convert_to_tensor=True) | |
| print(chunk_embeddings) | |
| # Similarity function (Matt's is different and combined with above) | |
| def get_top_chunks(query): | |
| query_embedding = model.encode(query, convert_to_tensor=True) | |
| query_embedding_normalized = query_embedding / query_embedding.norm() | |
| chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) | |
| similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) | |
| top_indices = torch.topk(similarities, k=3).indices.tolist() | |
| print(top_indices) | |
| top_chunks=[] | |
| top_chunks = [chunks[i] for i in top_indices] | |
| return top_chunks | |
| # Chatbot response | |
| def respond(message, history): | |
| messages = [{"role": "system", "content": "You are a friendly chatbot. You help people understand cognitive biases using simple language."}] | |
| if history: | |
| for human, ai in history: | |
| messages.append({"role": "user", "content": human}) | |
| messages.append({"role": "assistant", "content": ai}) | |
| # Add top knowledge chunks | |
| top_chunks = get_top_chunks(message) | |
| context = "\n".join(top_chunks) | |
| messages.append({"role": "user", "content": f"{context}\n{message}"}) | |
| response = client.chat_completion( | |
| messages=messages, | |
| max_tokens=200, | |
| temperature=0.2 | |
| ) | |
| return response.choices[0].message.content.strip() | |
| # Launch UI | |
| chatbot = gr.ChatInterface(fn=respond, chatbot=gr.Chatbot(), title="Let's Chat about Cognitive Biases!", description="Do you ever wonder how people can use shortcuts to make decisions, and how those shortcuts can bias our decision-making processes? This chatbot will engage you in learning about the different decision biases", theme="default") | |
| chatbot.launch() |