consy commited on
Commit
da90f7f
·
verified ·
1 Parent(s): 458ea50

Added Colab Stuff

Browse files
Files changed (1) hide show
  1. app.py +106 -1
app.py CHANGED
@@ -1,8 +1,113 @@
1
  import gradio as gr
2
  import random
 
 
 
 
3
  #import lines go at the top!
4
 
5
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  client = InferenceClient("microsoft/phi-4")
8
  # name of llm chatbot accessed ^^ or can use ' microsoft/phi-4 that's connected to the microsoft phi gen model
 
1
  import gradio as gr
2
  import random
3
+ from huggingface_hub import InferenceClient
4
+ # SEMANTIC SEARCH STEP 1
5
+ from sentence_transformers import SentenceTransformer
6
+ import torch
7
  #import lines go at the top!
8
 
9
+ # SEMANTIC SEARCH STEP 2 --> EDIT WITH YOUR OWN KNOWLEDGE BASE WHEN READY
10
+ with open("water_cycle.txt", "r", encoding="utf-8") as file:
11
+ # Read the entire contents of the file and store it in a variable
12
+ water_cycle_text = file.read()
13
+ print(water_cycle_text)
14
+
15
+ # SEMANTIC SEARCH STEP 3
16
+ def preprocess_text(text):
17
+ # Strip extra whitespace from the beginning and the end of the text
18
+ cleaned_text = text.strip()
19
+
20
+ # Split the cleaned_text by every newline character (\n)
21
+ chunks = cleaned_text.split("\n")
22
+
23
+ # Create an empty list to store cleaned chunks
24
+ cleaned_chunks = []
25
+
26
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
27
+ for chunk in chunks:
28
+ stripped_chunk = chunk.strip()
29
+ cleaned_chunks.append(stripped_chunk)
30
+
31
+
32
+
33
+ # Print cleaned_chunks
34
+ print(cleaned_chunks)
35
+
36
+ # Print the length of cleaned_chunks
37
+ print(len(cleaned_chunks))
38
+
39
+ # Return the cleaned_chunks
40
+ return cleaned_chunks
41
+
42
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
43
+ cleaned_chunks = preprocess_text(water_cycle_text) # Complete this line; edit with my knowledgebase when ready
44
+
45
+ # SEMANTIC SEARCH STEP 4
46
+ # Load the pre-trained embedding model that converts text to vectors
47
+ model = SentenceTransformer('all-MiniLM-L6-v2')
48
+
49
+ def create_embeddings(text_chunks):
50
+ # Convert each text chunk into a vector embedding and store as a tensor
51
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
52
+
53
+ # Print the chunk embeddings
54
+ print(chunk_embeddings)
55
+
56
+
57
+ # Print the shape of chunk_embeddings
58
+ print(chunk_embeddings.shape)
59
+
60
+
61
+ # Return the chunk_embeddings
62
+ return chunk_embeddings
63
+
64
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
65
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
66
+
67
+ # SEMANTIC SEARCH STEP 5
68
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
69
+ # Convert the query text into a vector embedding
70
+ query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
71
+
72
+ # Normalize the query embedding to unit length for accurate similarity comparison; bring it to the length of 1
73
+ query_embedding_normalized = query_embedding / query_embedding.norm()
74
+
75
+ # Normalize all chunk embeddings to unit length for consistent comparison
76
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
77
+
78
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
79
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
80
+
81
+ # Print the similarities
82
+ print(similarities)
83
+
84
+
85
+ # Find the indices of the 3 chunks with highest similarity scores
86
+ top_indices = torch.topk(similarities, k=3).indices
87
+
88
+ # Print the top indices
89
+ print(top_indices)
90
+
91
+
92
+ # Create an empty list to store the most relevant chunks
93
+ top_chunks = []
94
+
95
+ # Loop through the top indices and retrieve the corresponding text chunks
96
+ for i in top_indices:
97
+ relevant_info = cleaned_chunks[i]
98
+ top_chunks.append(relevant_info)
99
+
100
+ # Return the list of most relevant chunks
101
+ return top_chunks
102
+
103
+
104
+ # SEMANTIC SEARCH STEP 6
105
+
106
+ # Call the get_top_chunks function with the original query
107
+ top_results = get_top_chunks('Is water good?',chunk_embeddings, cleaned_chunks) # Complete this line
108
+
109
+ print(top_results)# Print the top results
110
+
111
 
112
  client = InferenceClient("microsoft/phi-4")
113
  # name of llm chatbot accessed ^^ or can use ' microsoft/phi-4 that's connected to the microsoft phi gen model