mgetz commited on
Commit
6c07787
·
verified ·
1 Parent(s): 49d9c6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -0
app.py CHANGED
@@ -3,6 +3,129 @@ from huggingface_hub import InferenceClient
3
  import gradio as gr
4
  import random
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
  def respond(message, history):
 
3
  import gradio as gr
4
  import random
5
 
6
+ #Semantic Search
7
+ #STEP 1
8
+ !pip install -q sentence-transformers
9
+ from sentence_transformers import SentenceTransformer
10
+ import torch
11
+
12
+ #STEP 2
13
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
14
+ with open("water_cycle.txt", "r", encoding="utf-8") as file:
15
+ # Read the entire contents of the file and store it in a variable
16
+ water_cycle_text = file.read()
17
+
18
+ # Print the text below
19
+ print(water_cycle_text)
20
+
21
+ #STEP 3
22
+ def preprocess_text(text):
23
+ # Strip extra whitespace from the beginning and the end of the text
24
+ cleaned_text = text.strip()
25
+
26
+ # Split the cleaned_text by every newline character (\n)
27
+ chunks = cleaned_text.split("\n")
28
+
29
+ # Create an empty list to store cleaned chunks
30
+ cleaned_chunks = []
31
+
32
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
33
+ for chunk in chunks:
34
+ stripped_chunk = chunk.strip()
35
+ if len(stripped_chunk) >= 0:
36
+ cleaned_chunks.append(stripped_chunk)
37
+
38
+ # Print cleaned_chunks
39
+ print(cleaned_chunks)
40
+
41
+ # Print the length of cleaned_chunks
42
+ print(len(cleaned_chunks))
43
+
44
+ # Return the cleaned_chunks
45
+ return cleaned_chunks
46
+
47
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
48
+ cleaned_chunks = preprocess_text(water_cycle_text)]
49
+
50
+ #STEP 4
51
+ # Load the pre-trained embedding model that converts text to vectors
52
+ model = SentenceTransformer('all-MiniLM-L6-v2')
53
+
54
+ def create_embeddings(text_chunks):
55
+ # Convert each text chunk into a vector embedding and store as a tensor
56
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
57
+
58
+ # Print the chunk embeddings
59
+ print(chunk_embeddings)
60
+
61
+ # Print the shape of chunk_embeddings
62
+ print(chunk_embeddings.shape) # no parentheses on .shape because it's a property, not a method! Look up the difference between class methods and classes properties.
63
+
64
+ # Return the chunk_embeddings
65
+ return chunk_embeddings
66
+
67
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
68
+ chunk_embeddings = create_embeddings(cleaned_chunks)
69
+
70
+ #STEP 5
71
+ # Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
72
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
73
+ # Convert the query text into a vector embedding
74
+ query_embedding = model.encode(query, convert_to_tensor=True)
75
+
76
+ # Normalize the query embedding to unit length for accurate similarity comparison
77
+ query_embedding_normalized = query_embedding / query_embedding.norm()
78
+
79
+ # Normalize all chunk embeddings to unit length for consistent comparison
80
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
81
+
82
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
83
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
84
+
85
+ # Print the similarities
86
+ print(similarities)
87
+
88
+ # Find the indices of the 3 chunks with highest similarity scores
89
+ top_indices = torch.topk(similarities, k=3).indices
90
+
91
+ # Print the top indices
92
+ print(top_indices)
93
+
94
+ # Create an empty list to store the most relevant chunks
95
+ top_chunks = []
96
+
97
+ # Loop through the top indices and retrieve the corresponding text chunks
98
+ for index in top_indices:
99
+ chunk = text_chunks[index]
100
+ top_chunks.append(chunk)
101
+
102
+ # Return the list of most relevant chunks
103
+ return top_chunks
104
+
105
+ #STEP 6 or Practice
106
+ # ===== LOAD & PROCESS YOUR NEW CONTENT =====
107
+ with open("em_spectrum.txt", "r", encoding="utf-8") as file:
108
+ # Read the entire contents of the file and store it in a variable
109
+ em_spectrum_text = file.read()
110
+
111
+ # Print the text below
112
+ print(em_spectrum_text)
113
+
114
+ # ===== APPLY THE COMPLETE WORKFLOW =====
115
+ #need cleaned_chunks variable
116
+ #need chunk_embeddings variable
117
+ em_cleaned_chunks = preprocess_text(em_spectrum_text)
118
+ em_chunk_embeddings = create_embeddings(em_cleaned_chunks)
119
+
120
+ test_question = "What type of EM radiation has the most energy?"
121
+
122
+ print("test question:", test_question)
123
+ em_top_results = get_top_chunks(test_question, em_chunk_embeddings, em_cleaned_chunks)
124
+ print(em_top_results)
125
+
126
+
127
+ # ===== EXPERIMENT & VERIFY =====
128
+
129
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
130
 
131
  def respond(message, history):