Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import random | |
| from huggingface_hub import InferenceClient | |
| #STEP 1 FROM SEMANTIC SEARCH | |
| from sentence_transformers import SentenceTransformer | |
| import torch | |
| #STEP 2 FROM SEMANTIC SEARCH | |
| # Open the water_cycle.txt file in read mode with UTF-8 encoding | |
| with open("quentins_knowledge.txt", "r", encoding="utf-8") as file: | |
| # Read the entire contents of the file and store it in a variable | |
| quentins_knowledge = file.read() | |
| #SECOND FEATURE | |
| with open("quentins_alt_knowledge.txt", "r", encoding="utf-8") as file: | |
| # Read the entire contents of the file and store it in a variable | |
| quentins_alt_knowledge = file.read() | |
| # Print the text below | |
| print(quentins_knowledge) | |
| print(quentins_alt_knowledge) | |
| #STEP 3 FROM SEMANTIC SEARCH | |
| def preprocess_text(text): | |
| # Strip extra whitespace from the beginning and the end of the text | |
| cleaned_text = text.strip() | |
| # Split the cleaned_text by every newline character (\n) | |
| chunks = cleaned_text.split("\n") | |
| # Create an empty list to store cleaned chunks | |
| cleaned_chunks = [] | |
| # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list | |
| for chunk in chunks: | |
| stripped_chunk = chunk.strip() | |
| if len(stripped_chunk) >= 0: | |
| cleaned_chunks.append(stripped_chunk) | |
| # Print cleaned_chunks | |
| print(cleaned_chunks) | |
| # Print the length of cleaned_chunks | |
| print(len(cleaned_chunks)) | |
| # Return the cleaned_chunks | |
| return cleaned_chunks | |
| # Call the preprocess_text function and store the result in a cleaned_chunks variable | |
| cleaned_chunks = preprocess_text(quentins_knowledge) | |
| #SECOND FEATURE | |
| cleaned_alt_chunks = preprocess_text(quentins_alt_knowledge) | |
| #STEP 4 FROM SEMANTIC SEARCH | |
| # Load the pre-trained embedding model that converts text to vectors | |
| model = SentenceTransformer('all-MiniLM-L6-v2') | |
| def create_embeddings(text_chunks): | |
| # Convert each text chunk into a vector embedding and store as a tensor | |
| chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list | |
| # Print the chunk embeddings | |
| print(chunk_embeddings) | |
| # Print the shape of chunk_embeddings | |
| print(chunk_embeddings.shape) # no parentheses on .shape because it's a property, not a method! Look up the difference between class methods and classes properties. | |
| # Return the chunk_embeddings | |
| return chunk_embeddings | |
| # Call the create_embeddings function and store the result in a new chunk_embeddings variable | |
| chunk_embeddings = create_embeddings(cleaned_chunks) | |
| #SECOND FEATURE | |
| alt_chunk_embeddings = create_embeddings(cleaned_alt_chunks) | |
| #STEP 5 FROM SEMANTIC SEARCH | |
| # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks | |
| def get_top_chunks(query, chunk_embeddings, text_chunks): | |
| # Convert the query text into a vector embedding | |
| query_embedding = model.encode(query, convert_to_tensor=True) | |
| # Normalize the query embedding to unit length for accurate similarity comparison | |
| query_embedding_normalized = query_embedding / query_embedding.norm() | |
| # Normalize all chunk embeddings to unit length for consistent comparison | |
| chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True) | |
| # Calculate cosine similarity between query and all chunks using matrix multiplication | |
| similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) | |
| # Print the similarities | |
| print(similarities) | |
| # Find the indices of the 3 chunks with highest similarity scores | |
| top_indices = torch.topk(similarities, k=3).indices | |
| # Print the top indices | |
| print(top_indices) | |
| # Create an empty list to store the most relevant chunks | |
| top_chunks = [] | |
| # Loop through the top indices and retrieve the corresponding text chunks | |
| for index in top_indices: | |
| chunk = text_chunks[index] | |
| top_chunks.append(chunk) | |
| # Return the list of most relevant chunks | |
| return top_chunks | |
| client = InferenceClient("google/gemma-3-27b-it") | |
| def respond(message, history, name, mood, topic): | |
| duck_chunks = [] | |
| if quentin_topic == "Self Help": | |
| duck_chunks = get_top_chunks(message, chunk_embeddings, cleaned_chunks) | |
| print(duck_chunks) | |
| elif quentin_topic == "Duck Facts": | |
| duck_chunks = get_top_chunks(message, chunk_embeddings, cleaned_alt_chunks) | |
| print(duck_chunks) | |
| duck_info = "\n".join(duck_chunks) | |
| messages = [{"role": "system", "content": f"You are an extremely {mood} chatbot named Quentin. You are a rubber duck, with strong human emotions who helps the user with their problem related to {topic}. You talk to the user, whose name is {name}, in a way that reflects your {mood} mood. Make sure to use duck-themed references in your responses. Refer to the user by name as much as possible. Base your response on the provided context: {duck_info}. Always end your response with a brief, punchy tagline."}] | |
| if history: | |
| messages.extend(history) | |
| messages.append({"role": "user", "content": message}) | |
| response = client.chat_completion( | |
| messages, | |
| max_tokens=200, | |
| temperature=0.35 | |
| ) | |
| print(message) | |
| print(history) | |
| return response['choices'][0]['message']['content'].strip() | |
| # def echo(message, history): | |
| # return message | |
| # def yes_no(message, history): | |
| # responses = ["Yes", "No"] | |
| # return random.choice(responses) | |
| # def magic_eight(message, history): | |
| # responses = ["That's a terrible question. Try again", "I don't think I should answer that...", "What do you think, genius?", "You are a bad person for asking that.", "Absolutely not", "Uuuuh, obviously.", "Of all the things you could ask, you went with that?", "I don't know, look it up", "I mean, yeah, I guess...", "That's gonna be a big nope", ""] | |
| # return random.choice(responses) | |
| title = "Ask Quentin" | |
| about_text = "Quentin says: 'I'm an expert, not a quack'" | |
| with gr.Blocks(theme=gr.themes.Citrus( | |
| secondary_hue="red", | |
| neutral_hue="gray", | |
| text_size="lg", | |
| ).set( | |
| background_fill_primary='*neutral_200', | |
| background_fill_secondary='*neutral_400', | |
| background_fill_secondary_dark='*secondary_500', | |
| border_color_accent='*secondary_400', | |
| border_color_accent_dark='*secondary_800', | |
| color_accent='*secondary_300', | |
| color_accent_soft='*secondary_500', | |
| color_accent_soft_dark='*secondary_400', | |
| button_primary_background_fill='*secondary_500', | |
| button_primary_background_fill_dark='*secondary_600' | |
| )) as chatbot: | |
| with gr.Row(scale=1): | |
| gr.Image("ask_quentin_banner.jpg", show_label = False, show_share_button = False, show_download_button = False) | |
| with gr.Row(scale=1): | |
| quentin_topic = gr.CheckboxGroup(["Self Help", "Duck Facts"], label="What do you want help with?") | |
| with gr.Row(scale=4): | |
| with gr.Column(scale=1): | |
| gr.Image("Quentin.png", show_label = False, show_share_button = False, show_download_button = False) | |
| username = gr.Textbox(placeholder="Type your name here", label="Name") | |
| quentin_attitude = gr.CheckboxGroup(["Kind", "Angry", "childish", "Tough Guy"], label="What is Quentin's Mood?") | |
| with gr.Column(scale=3): | |
| gr.ChatInterface(fn=respond, type="messages", additional_inputs=[username, quentin_attitude, quentin_topic], title="Quentin, the Helpful Quackbot") | |
| chatbot.launch() |