kaitwithkwk commited on
Commit
22cf4d4
·
verified ·
1 Parent(s): fccc9d2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -0
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ from huggingface_hub import InferenceClient
4
+ from sentence_transformers import SentenceTransformer
5
+ import torch
6
+
7
+ with open("knowledge.txt", "r", encoding="utf-8") as file:
8
+ recent = file.read()
9
+
10
+ cleaned_text = recent.strip()
11
+ chunks = cleaned_text.split("\n")
12
+ cleaned_chunks = []
13
+
14
+ for chunk in chunks:
15
+ stripped_chunk = chunk.strip()
16
+ if stripped_chunk:
17
+ cleaned_chunks.append(stripped_chunk)
18
+
19
+ model = SentenceTransformer('all-MiniLM-L6-v2')
20
+
21
+ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
22
+
23
+ def get_top_chunks(query):
24
+ query_embedding = model.encode(query, convert_to_tensor=True)
25
+ query_embedding_normalized = query_embedding / query_embedding.norm()
26
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
27
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
28
+
29
+ top_indices = torch.topk(similarities, k=3).indices
30
+
31
+ top_chunks = []
32
+
33
+ for i in top_indices:
34
+ chunk = chunks[i]
35
+ top_chunks.append(chunk)
36
+
37
+ return top_chunks
38
+
39
+ client = InferenceClient('HuggingFaceH4/zephyr-7b-beta')
40
+
41
+ def respond(message,history):
42
+ gift_ideas = get_top_chunks(message)
43
+ messages = [{'role': 'system', 'content': f'You give really good gift ideas and are super helpful! You also tell me the price of each item. Give me 5 gift ideas if I ask. Use the following database for gift ideas: {gift_ideas}'}]
44
+
45
+ if history:
46
+ messages.extend(history)
47
+
48
+ messages.append({"role": "user", "content": message})
49
+
50
+ response = client.chat_completion(
51
+ messages,
52
+ max_tokens = 500,
53
+ )
54
+
55
+ return response['choices'][0]['message']['content'].strip()
56
+
57
+
58
+ with gr.Blocks(theme='hmb/amethyst') as demo:
59
+ gr.Image(value="wrap_it_top_image.png", show_label=False, elem_id="top-image")
60
+
61
+ gr.Markdown("## 🎁 Introducing WrapIT!")
62
+ gr.Markdown("**WrapIT** helps users find personalized gift ideas and craft thoughtful card messages by inputting details like the recipient's interests, celebration type, and budget ✨ *All you have to do is wrap it.*")
63
+
64
+ gr.ChatInterface(
65
+ fn=respond,
66
+ examples=["Best birthday gift?", "Romantic anniversary idea?", "Budget-friendly gifts?"]
67
+ )
68
+
69
+ with gr.Row():
70
+ gr.HTML(
71
+ """
72
+ <iframe style="border-radius:12px"
73
+ src="https://open.spotify.com/embed/track/4356Typ82hUiFAynbLYbPn"
74
+ width="100%"
75
+ height="152"
76
+ frameBorder="0"
77
+ allowfullscreen=""
78
+ allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture"
79
+ loading="lazy">
80
+ </iframe>
81
+ """
82
+ )
83
+
84
+ demo.launch(debug=True, share=True)