import gradio as gr from huggingface_hub import InferenceClient from sentence_transformers import SentenceTransformer import torch import numpy as np # Open the .txt file in read mode with UTF-8 encoding which you uploaded with open("uni_dataset.txt", "r", encoding="utf-8") as file: # Read the entire contents of the file and store it in a variabled uni_dataset_text = file.read() # Print the text below print("success") chunks = [chunk.strip() for chunk in uni_dataset.split("\n\n") if chunk.strip()] embedder = SentenceTransformer('all-MiniLM-L6-v2') chunk_embeddings = embedder.encode(chunks, convert_to_tensor= True) def get_relevant_context(query, top_k=3): query_embedding = embedder.encode(query, convert_to_tensor = True) query_embedding = query_embedding / query_embedding.norm() norm_chunk_embeddings = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) similarities = torch.matmul(norm_chunk_embeddings, query_embedding) top_k_indices = torch.topk(similarities, k=top_k).indices.cpu().numpy() context = "\n\n".join([chunks[i] for i in top_k_indices]) return context client = InferenceClient("microsoft/phi-4") def respond(message, history): messages = [{"role": "system", "content": "you are a realistic and friendly career advisor to help secondary school students with important decisions such as the university courses they should apply to, careers to pursue, etc. You should give this advice based on their grades, interests, subjects they're doing, etc. Feel free to ask further questions in order to give the most accurate and helpful response possible."}] if history: messages.extend(history) messages.append({"role": "user", "content":message}) response = client.chat_completion( messages, max_tokens=500 ) return response['choices'][0]['message']['content'].strip() chatbot = gr.ChatInterface(respond, type = "messages", title = "CASSI") #chatbot ui - conversation history and user input chatbot.launch()