File size: 5,215 Bytes
2cc88aa
d65f063
273b646
2cc88aa
f3db198
 
2cc88aa
f3db198
d65f063
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3db198
 
 
d65f063
 
 
aef7763
bbadbe0
d65f063
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae12669
e298c78
d65f063
 
 
1843762
 
d65f063
 
 
 
 
 
 
daff49d
fa059d5
 
d65f063
 
 
fa059d5
 
daff49d
d65f063
 
fa059d5
 
 
a2436c0
fa059d5
d65f063
 
 
 
 
1acdb3d
af2a681
 
 
 
 
 
 
 
 
 
 
 
d65f063
 
 
af2a681
 
 
 
 
 
a2436c0
d28ec71
 
 
 
 
 
 
 
 
df2af75
 
3cf3ad6
 
 
df2af75
791c5f4
3d7deb0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a2436c0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import gradio as gr
import random
# import os
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch

with open("knowledge.txt", "r", encoding="utf-8") as file:
  recent = file.read()
  # opens the text, saves as "file"
  # reads the text and saves as water_cycle_text variable



cleaned_text = recent.strip()
# cleaning up the text
chunks = cleaned_text.split("\n")
# seperating the text into one sentence pieces
cleaned_chunks = []
# creating an empty list to put the cleaned chunks in

for chunk in chunks:
  stripped_chunk = chunk.strip()
  if stripped_chunk:
    cleaned_chunks.append(stripped_chunk)
    # loop through chunks and add not empty chunks to cleaned_chunks list


model = SentenceTransformer('all-MiniLM-L6-v2')

chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
# encode the model, pass through my cleaned chunks and convert to vector embeddings (not arrays)


def get_top_chunks(query):
  # create my function taking query as parameter
  query_embedding = model.encode(query, convert_to_tensor=True)
  # encode query to vector embedding for comparison
  query_embedding_normalized = query_embedding / query_embedding.norm()
  # normalize my query to 1; allows for comparison of meaning
  chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
  # normailizing chunks for comparison of meaning

  similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)

  # using matmul (matrix multiplication method) to compare query to chunks
  top_indices = torch.topk(similarities, k=3).indices

  # get the indices of the chunks that are most similar to query

  top_chunks = []

  for i in top_indices:
    chunk = chunks[i]
    # for each index number in top_indices, get back the text
    top_chunks.append(chunk)
    # values of each index number is added to top_chunks
  return top_chunks
    
client = InferenceClient('google/gemma-3-27b-it',)
    #token = 'HF_TOKEN'

#client is where you can change the LLM model!
def respond(message,history):
    #if not message.strip():
        #return "Hello!"
    gift_ideas = get_top_chunks(message)
    messages = [{'role': 'system', 'content': f'You give really good gift ideas and are super helpful! You also tell me the price of each item. Give me 5 gift ideas if I ask. Use the following database for gift ideas: {gift_ideas}'}]
    
    if history:
        messages.extend(history)
    
    messages.append({"role": "user", "content": message})

    response = client.chat_completion(
    # for message in client.chat_completion(
        #max_tokens controls how many words your responses is 
        messages,
        max_tokens = 500,
    )
        # stream = True,
        
        #temperature = 0.8, #code a decimal between 0-2
        #top_p = .65 #code a decimal between 0-1
    #):
        # token = message.choices[0].delta.content
        # response += token
    return response['choices'][0]['message']['content'].strip()
    #yield response
    #print(response["choices"][0]["message"]["content"].strip())
    
    #yield response["choices"][0]["message"]["content"].strip()
    


#with gr.Blocks(theme='hmb/amethyst') as demo:
 #   with gr.Row(equal_height=True):
  #      with gr.Column(scale=10):
   #            """
    #            # 🎁 Introducing WrapIT!
     #           **WrapIT** helps users find personalized gift ideas and craft thoughtful card messages  
      #          by inputting details like the recipient's interests, celebration type, and budget.
       #         
        #        ✨ *All you have to do is wrap it.*
         #       """
          #  )
           # gr.ChatInterface(respond, type='messages')

  
    
    #chatbot = gr.Chatbot()
    #msg = gr.Textbox(placeholder="Say hi to WrapIT here!", label="Message")
    #send = gr.Button("Send")

    #msg.submit(respond, [msg, chatbot], [msg, chatbot])
    #send.click(respond, [msg, chatbot], [msg, chatbot])
print("The bug is in the gradio")
with gr.Blocks(theme='hmb/amethyst') as demo:
    # Top image
    gr.Image(value="wrap_it_top_image.png", show_label=False, elem_id="top-image")

    # Title and description
    gr.Markdown("## 🎁 Introducing WrapIT!")
    gr.Markdown("**WrapIT** helps users find personalized gift ideas and craft thoughtful card messages by inputting details like the recipient's interests, celebration type, and budget ✨ *All you have to do is wrap it.*")

    # Chat interface
    
    gr.ChatInterface(
        fn=respond,
        examples=["Best birthday gift?", "Romantic anniversary idea?", "Budget-friendly gifts?"]
    )


    with gr.Row():
        gr.HTML(
            """
            <iframe style="border-radius:12px"
                    src="https://open.spotify.com/embed/track/4356Typ82hUiFAynbLYbPn"
                    width="100%"
                    height="152"
                    frameBorder="0"
                    allowfullscreen=""
                    allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture"
                    loading="lazy">
            </iframe>
            """
        )

demo.launch(debug=True, share=True)