irmakakin commited on
Commit
8778419
·
verified ·
1 Parent(s): d1ff2ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -3
app.py CHANGED
@@ -1,11 +1,20 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
-
 
 
4
  client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
5
 
 
 
 
 
 
6
  def respond(message, history):
7
-
8
- messages = [{"role": "system", "content": "You are an annoying chatbot who forces me to study"}]
 
 
9
 
10
  if history:
11
  messages.extend(history)
@@ -22,4 +31,78 @@ def respond(message, history):
22
 
23
  chatbot = gr.ChatInterface(respond, type="messages")
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  chatbot.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ from sentence_transformers import SentenceTransformer
4
+ import torch
5
+ import numpy as np
6
  client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
7
 
8
+ # Open the ECOsphere.txt file in read mode with UTF-8 encoding
9
+ with open("ECOsphere.txt", "r", encoding="utf-8") as file:
10
+ # Read the entire contents of the file and store it in a variable
11
+ ECOsphere_text = file.read()
12
+
13
  def respond(message, history):
14
+ top_results = get_top_chunks( message , chunk_embeddings, cleaned_chunks) # Complete this line
15
+ # Print the top results
16
+ print(top_results)
17
+ messages = [{"role": "system", "content": "You are a chatbot that encourage people to live more sustainably. Base your response on the following action {top_results}"}]
18
 
19
  if history:
20
  messages.extend(history)
 
31
 
32
  chatbot = gr.ChatInterface(respond, type="messages")
33
 
34
+ def preprocess_text(text):
35
+ # Strip extra whitespace from the beginning and the end of the text
36
+ cleaned_text = text.strip()
37
+
38
+ # Split the cleaned_text by every newline character (\n)
39
+ chunks = cleaned_text.split("\n")
40
+
41
+ # Create an empty list to store cleaned chunks
42
+ cleaned_chunks = []
43
+
44
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
45
+ for chunk in chunks:
46
+ stripped_chunk = chunk.strip()
47
+ cleaned_chunks.append(stripped_chunk)
48
+
49
+
50
+ # Print cleaned_chunks
51
+ print(cleaned_chunks)
52
+
53
+ # Print the length of cleaned_chunks
54
+ print(len(cleaned_chunks))
55
+
56
+ # Return the cleaned_chunks
57
+ return cleaned_chunks
58
+
59
+ # Load the pre-trained embedding model that converts text to vectors
60
+ model = SentenceTransformer('all-MiniLM-L6-v2')
61
+
62
+ def create_embeddings(text_chunks):
63
+ # Convert each text chunk into a vector embedding and store as a tensor
64
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
65
+
66
+ # Print the chunk embeddings
67
+ print(chunk_embeddings)
68
+
69
+ # Print the shape of chunk_embeddings
70
+ print(chunk_embeddings.shape)
71
+
72
+ # Return the chunk_embeddings
73
+ return chunk_embeddings
74
+
75
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
76
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
77
+ # Convert the query text into a vector embedding
78
+ query_embedding = model.encode(query, convert_to_tensor=True) # Complete this line
79
+
80
+ # Normalize the query embedding to unit length for accurate similarity comparison
81
+ query_embedding_normalized = query_embedding / query_embedding.norm()
82
+
83
+ # Normalize all chunk embeddings to unit length for consistent comparison
84
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
85
+
86
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
87
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized ) # Complete this line
88
+
89
+ # Print the similarities
90
+ print(similarities)
91
+
92
+ # Find the indices of the 3 chunks with highest similarity scores
93
+ top_indices = torch.topk(similarities, k=3).indices
94
+
95
+ # Print the top indices
96
+ print(top_indices)
97
+
98
+ # Create an empty list to store the most relevant chunks
99
+ top_chunks = []
100
+
101
+ # Loop through the top indices and retrieve the corresponding text chunks
102
+ for indices in top_indices:
103
+ relevant_info = cleaned_chunks
104
+ top_chunks.append(relevant_info)
105
+
106
+
107
+
108
  chatbot.launch()