bhavya-k commited on
Commit
706313e
·
verified ·
1 Parent(s): 1663641
Files changed (1) hide show
  1. app.py +131 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import os
4
+ from huggingface_hub import InferenceClient
5
+ from sentence_transformers import SentenceTransformer
6
+ import torch
7
+
8
+ with open("knowledge.txt", "r", encoding="utf-8") as file:
9
+ recent = file.read()
10
+ # opens the text, saves as "file"
11
+ # reads the text and saves as water_cycle_text variable
12
+
13
+
14
+
15
+ cleaned_text = recent.strip()
16
+ # cleaning up the text
17
+ chunks = cleaned_text.split("\n")
18
+ # seperating the text into one sentence pieces
19
+ cleaned_chunks = []
20
+ # creating an empty list to put the cleaned chunks in
21
+
22
+ for chunk in chunks:
23
+ stripped_chunk = chunk.strip()
24
+ if stripped_chunk:
25
+ cleaned_chunks.append(stripped_chunk)
26
+ # loop through chunks and add not empty chunks to cleaned_chunks list
27
+
28
+
29
+ model = SentenceTransformer('all-MiniLM-L6-v2')
30
+
31
+ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
32
+ # encode the model, pass through my cleaned chunks and convert to vector embeddings (not arrays)
33
+
34
+
35
+ def get_top_chunks(query):
36
+ # create my function taking query as parameter
37
+ query_embedding = model.encode(query, convert_to_tensor=True)
38
+ # encode query to vector embedding for comparison
39
+ query_embedding_normalized = query_embedding / query_embedding.norm()
40
+ # normalize my query to 1; allows for comparison of meaning
41
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
42
+ # normailizing chunks for comparison of meaning
43
+
44
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
45
+
46
+ # using matmul (matrix multiplication method) to compare query to chunks
47
+ top_indices = torch.topk(similarities, k=3).indices
48
+
49
+ # get the indices of the chunks that are most similar to query
50
+
51
+ top_chunks = []
52
+
53
+ for i in top_indices:
54
+ chunk = chunks[i]
55
+ # for each index number in top_indices, get back the text
56
+ top_chunks.append(chunk)
57
+ # values of each index number is added to top_chunks
58
+ return top_chunks
59
+
60
+ client = InferenceClient(
61
+ model='Qwen/Qwen2.5-72B-Instruct',
62
+ #token=os.getenv('HF_TOKEN')
63
+ )
64
+
65
+ #client is where you can change the LLM model!
66
+ def respond(message,history):
67
+ if not message.strip():
68
+ return "Hello!"
69
+ gift_ideas = get_top_chunks(message)
70
+ messages = [{'role': 'system', 'content': f'You give really good gift ideas and are super helpful! You also tell me the price of each item. You also offer to give a card stem to write thoughful cards to give with the gift. The card stems you give are related to the type of celebration the person is buying a gift for. You offer to give them a card template for a longer card if they want and if they say yes give them a card template. Give me 5 gift ideas if I ask. Use the following database for gift ideas: {gift_ideas}'}]
71
+
72
+ if history:
73
+ messages.extend(history)
74
+
75
+ messages.append({"role": "user", "content": message})
76
+
77
+ response = ""
78
+ for message in client.chat_completion(
79
+ #max_tokens controls how many words your responses is
80
+ messages,
81
+ max_tokens = 500,
82
+ stream = True,
83
+
84
+ #temperature = 0.8, #code a decimal between 0-2
85
+ #top_p = .65 #code a decimal between 0-1
86
+ ):
87
+ token = message.choices[0].delta.content
88
+ response += token
89
+ yield response
90
+ #print(response["choices"][0]["message"]["content"].strip())
91
+
92
+ #yield response["choices"][0]["message"]["content"].strip()
93
+
94
+
95
+
96
+ #with gr.Blocks(theme='hmb/amethyst') as demo:
97
+ # with gr.Row(equal_height=True):
98
+ # with gr.Column(scale=10):
99
+ # """
100
+ # # 🎁 Introducing WrapIT!
101
+ # **WrapIT** helps users find personalized gift ideas and craft thoughtful card messages
102
+ # by inputting details like the recipient's interests, celebration type, and budget.
103
+ #
104
+ # ✨ *All you have to do is wrap it.*
105
+ # """
106
+ # )
107
+ # gr.ChatInterface(respond, type='messages')
108
+
109
+
110
+
111
+ #chatbot = gr.Chatbot()
112
+ #msg = gr.Textbox(placeholder="Say hi to WrapIT here!", label="Message")
113
+ #send = gr.Button("Send")
114
+
115
+ #msg.submit(respond, [msg, chatbot], [msg, chatbot])
116
+ #send.click(respond, [msg, chatbot], [msg, chatbot])
117
+
118
+ with gr.Blocks(theme='hmb/amethyst') as demo:
119
+ # Top image
120
+ gr.Image(value="wrap_it_top_image.png", show_label=False, elem_id="top-image")
121
+
122
+ # Title and description
123
+ gr.Markdown("## 🎁 Introducing WrapIT!")
124
+ gr.Markdown("**WrapIT** helps users find personalized gift ideas and craft thoughtful card messages by inputting details like the recipient's interests, celebration type, and budget ✨ *All you have to do is wrap it.*")
125
+
126
+
127
+
128
+ # Chat interface
129
+
130
+
131
+ demo.launch()