Ambereen commited on
Commit
f7c231c
·
verified ·
1 Parent(s): e88eed5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -0
app.py CHANGED
@@ -1,6 +1,103 @@
1
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
5
  def respond(message, history):
6
  messages= [{'role':'system', "content":"You are a friend chatbot"}]
 
1
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+
5
+ #STEP 1 FROM SEMANTIC SEARCH
6
+ !pip install -q sentence-transformers
7
+ from sentence_transformers import SentenceTransformer
8
+ import torch
9
+
10
+ #STEP 2 FROM SEMANTIC SEARCH
11
+ # Open the water_cycle.txt file in read mode with UTF-8 encoding
12
+ with open("water_cycle.txt", "r", encoding="utf-8") as file:
13
+ # Read the entire contents of the file and store it in a variable
14
+ water_cycle_text = file.read()
15
+
16
+ # Print the text below
17
+ print(water_cycle_text)
18
+
19
+ # STEP 3 FROM SEMANTIC SEARCH
20
+ def preprocess_text(text):
21
+ # Strip extra whitespace from the beginning and the end of the text
22
+ cleaned_text = text.strip()
23
+ # Split the cleaned_text by every newline character (\n)
24
+ chunks = cleaned_text.split("\n")
25
+ # Create an empty list to store cleaned chunks
26
+ cleaned_chunks = []
27
+ # Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
28
+ for chunk in chunks:
29
+ stripped_chunk = chunk.strip()
30
+ if stripped_chunk != "":
31
+ cleaned_chunks.append(stripped_chunk)
32
+ # Print cleaned_chunks
33
+ print(cleaned_chunks)
34
+ # Print the length of cleaned_chunks
35
+ print(len(cleaned_chunks))
36
+ # Return the cleaned_chunks
37
+ return cleaned_chunks
38
+ # Call the preprocess_text function and store the result in a cleaned_chunks variable
39
+ cleaned_chunks = preprocess_text(water_cycle_text) # Complete this line
40
+
41
+ # STEP 4 FROM SEMANTIC SEARCH
42
+ # Load the pre-trained embedding model that converts text to vectors
43
+ model = SentenceTransformer('all-MiniLM-L6-v2')
44
+
45
+ def create_embeddings(text_chunks):
46
+ # Convert each text chunk into a vector embedding and store as a tensor
47
+ chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
48
+
49
+ # Print the chunk embeddings
50
+ print(chunk_embeddings)
51
+
52
+ # Print the shape of chunk_embeddings
53
+ print(chunk_embeddings.shape)
54
+
55
+ # Return the chunk_embeddings
56
+ return chunk_embeddings
57
+
58
+ # Call the create_embeddings function and store the result in a new chunk_embeddings variable
59
+ chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
60
+
61
+ #STEP 5 IN SEMANTIC SEARCH
62
+ def get_top_chunks(query, chunk_embeddings, text_chunks):
63
+ # Convert the query text into a vector embedding
64
+ query_embedding = model.encode(query) # Complete this line
65
+
66
+ # Normalize the query embedding to unit length for accurate similarity comparison
67
+ query_embedding_normalized = query_embedding / query_embedding.norm()
68
+
69
+ # Normalize all chunk embeddings to unit length for consistent comparison
70
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
71
+
72
+ # Calculate cosine similarity between query and all chunks using matrix multiplication
73
+ similarities = torch.matmul(chunks_embeddings_normalized, query_embedding_normalized) # Complete this line
74
+
75
+ # Print the similarities
76
+ print(similarities)
77
+
78
+ # Find the indices of the 3 chunks with highest similarity scores
79
+ top_indices = torch.topk(similarities, k=3).indices
80
+
81
+ # Print the top indices
82
+ print(top_indices)
83
+
84
+ # Create an empty list to store the most relevant chunks
85
+ top_chunks = []
86
+
87
+ # Loop through the top indices and retrieve the corresponding text chunks
88
+ for i in top_indices:
89
+ top_chunks.append(chunks[i])
90
+
91
+ # Return the list of most relevant chunks
92
+ return top_chunks
93
+
94
+ #STEP 6 IN SEMANTIC SEARCH
95
+ # Call the get_top_chunks function with the original query
96
+ top_results = ('How does water get into sky?',chunk_embeddings, cleaned_chunks) # Complete this line
97
+
98
+ # Print the top results
99
+ print(top_results)
100
+
101
  client=InferenceClient("HuggingFaceH4/zephyr-7b-beta")
102
  def respond(message, history):
103
  messages= [{'role':'system', "content":"You are a friend chatbot"}]