mgetz commited on
Commit
d328d93
·
verified ·
1 Parent(s): cc6172e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +93 -4
app.py CHANGED
@@ -9,16 +9,105 @@ import random
9
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
  #STEP 2 from semantic search (read file)
11
  # Open the water_cycle.txt file in read mode with UTF-8 encoding
12
- with open("water_cycle.txt", "r", encoding="utf-8") as file:
13
  # Read the entire contents of the file and store it in a variable
14
- water_cycle_text = file.read()
15
 
16
  # Print the text below
17
- print(water_cycle_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  def respond(message, history):
19
  messages = [
20
  {"role":"system",
21
- "content": "You are a friendly chatbot! :)",
22
  }
23
  ]
24
 
 
9
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
  #STEP 2 from semantic search (read file)
11
  # Open the water_cycle.txt file in read mode with UTF-8 encoding
12
+ with open("physics_info.txt", "r", encoding="utf-8") as file:
13
  # Read the entire contents of the file and store it in a variable
14
+ physics_info_text = file.read()
15
 
16
  # Print the text below
17
+ print(physics_info_text)
18
+
19
+ #Step 3 from Semantic Search (chunk data)
20
+ def preprocess_text(text):
21
+ # Strip extra whitespace from the beginning and the end of the text
22
+ cleaned_text = text.strip()
23
+
24
+ # Split the cleaned_text by every newline character (\n)
25
+ chunks = cleaned_text.split(".")
26
+
27
+ # Create an empty list to store cleaned chunks
28
+ cleaned_chunks = []
29
+
30
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
31
+ for chunk in chunks:
32
+ stripped_chunk = chunk.strip()
33
+ if len(stripped_chunk) >= 0:
34
+ cleaned_chunks.append(stripped_chunk)
35
+
36
+ # Print cleaned_chunks
37
+ print(cleaned_chunks)
38
+
39
+ # Print the length of cleaned_chunks
40
+ print(len(cleaned_chunks))
41
+
42
+ # Return the cleaned_chunks
43
+ return cleaned_chunks
44
+
45
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
46
+ cleaned_chunks = preprocess_text(physics_info_text)
47
+
48
+ # Load the pre-trained embedding model that converts text to vectors
49
+ model = SentenceTransformer('all-MiniLM-L6-v2')
50
+
51
+ #STEP 4 from Semantic Search - (embed chunks)
52
+ def create_embeddings(text_chunks):
53
+ # Convert each text chunk into a vector embedding and store as a tensor
54
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
55
+
56
+ # Print the chunk embeddings
57
+ print(chunk_embeddings)
58
+
59
+ # Print the shape of chunk_embeddings
60
+ print(chunk_embeddings.shape) # no parentheses on .shape because it's a property, not a method! Look up the difference between class methods and classes properties.
61
+
62
+ # Return the chunk_embeddings
63
+ return chunk_embeddings
64
+
65
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
66
+ chunk_embeddings = create_embeddings(cleaned_chunks)
67
+
68
+ #Step 5 from semantic search (find and print top chunks)
69
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
70
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
71
+ # Convert the query text into a vector embedding
72
+ query_embedding = model.encode(query, convert_to_tensor=True)
73
+
74
+ # Normalize the query embedding to unit length for accurate similarity comparison
75
+ query_embedding_normalized = query_embedding / query_embedding.norm()
76
+
77
+ # Normalize all chunk embeddings to unit length for consistent comparison
78
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
79
+
80
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
81
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
82
+
83
+ # Print the similarities
84
+ print(similarities)
85
+
86
+ # Find the indices of the 3 chunks with highest similarity scores
87
+ top_indices = torch.topk(similarities, k=3).indices
88
+
89
+ # Print the top indices
90
+ print(top_indices)
91
+
92
+ # Create an empty list to store the most relevant chunks
93
+ top_chunks = []
94
+
95
+ # Loop through the top indices and retrieve the corresponding text chunks
96
+ for index in top_indices:
97
+ chunk = text_chunks[index]
98
+ top_chunks.append(chunk)
99
+
100
+ # Return the list of most relevant chunks
101
+ return top_chunks
102
+
103
+ best_physics_chunks = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
104
+ print(best_physics_chunks)
105
+
106
+
107
  def respond(message, history):
108
  messages = [
109
  {"role":"system",
110
+ "content": "You are a very smart, arrogant professor who knows a lot about physics and thinks the user is dumb. You answer their physics question",
111
  }
112
  ]
113