capstonefinal / app.py
bhavya-k's picture
try
652389b verified
import gradio as gr
import random
import os
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
with open("knowledge.txt", "r", encoding="utf-8") as file:
recent = file.read()
# opens the text, saves as "file"
# reads the text and saves as water_cycle_text variable
cleaned_text = recent.strip()
# cleaning up the text
chunks = cleaned_text.split("\n")
# seperating the text into one sentence pieces
cleaned_chunks = []
# creating an empty list to put the cleaned chunks in
for chunk in chunks:
stripped_chunk = chunk.strip()
if stripped_chunk:
cleaned_chunks.append(stripped_chunk)
# loop through chunks and add not empty chunks to cleaned_chunks list
model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
# encode the model, pass through my cleaned chunks and convert to vector embeddings (not arrays)
def get_top_chunks(query):
# create my function taking query as parameter
query_embedding = model.encode(query, convert_to_tensor=True)
# encode query to vector embedding for comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# normalize my query to 1; allows for comparison of meaning
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# normailizing chunks for comparison of meaning
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
# using matmul (matrix multiplication method) to compare query to chunks
top_indices = torch.topk(similarities, k=3).indices
# get the indices of the chunks that are most similar to query
top_chunks = []
for i in top_indices:
chunk = chunks[i]
# for each index number in top_indices, get back the text
top_chunks.append(chunk)
# values of each index number is added to top_chunks
return top_chunks
client = InferenceClient(
model='Qwen/Qwen2.5-72B-Instruct',
#token=os.getenv('HF_TOKEN')
)
#client is where you can change the LLM model!
def respond(message,history):
if not message.strip():
return "Hello!"
gift_ideas = get_top_chunks(message)
messages = [{'role': 'system', 'content': f'You give really good gift ideas and are super helpful! You also tell me the price of each item. You also offer to give a card stem to write thoughful cards to give with the gift. The card stems you give are related to the type of celebration the person is buying a gift for. You offer to give them a card template for a longer card if they want and if they say yes give them a card template. Give me 5 gift ideas if I ask. Use the following database for gift ideas: {gift_ideas}'}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
#max_tokens controls how many words your responses is
messages,
max_tokens = 500,
stream = True,
#temperature = 0.8, #code a decimal between 0-2
#top_p = .65 #code a decimal between 0-1
):
token = message.choices[0].delta.content
response += token
yield response
#print(response["choices"][0]["message"]["content"].strip())
#yield response["choices"][0]["message"]["content"].strip()
#with gr.Blocks(theme='hmb/amethyst') as demo:
# with gr.Row(equal_height=True):
# with gr.Column(scale=10):
# """
# # 🎁 Introducing WrapIT!
# **WrapIT** helps users find personalized gift ideas and craft thoughtful card messages
# by inputting details like the recipient's interests, celebration type, and budget.
#
# ✨ *All you have to do is wrap it.*
# """
# )
# gr.ChatInterface(respond, type='messages')
#chatbot = gr.Chatbot()
#msg = gr.Textbox(placeholder="Say hi to WrapIT here!", label="Message")
#send = gr.Button("Send")
#msg.submit(respond, [msg, chatbot], [msg, chatbot])
#send.click(respond, [msg, chatbot], [msg, chatbot])
with gr.Blocks(theme='hmb/amethyst') as demo:
# Top image
gr.Image(value="wrap_it_top_image.png", show_label=False, elem_id="top-image")
# Title and description
gr.Markdown("## 🎁 Introducing WrapIT!")
gr.Markdown("**WrapIT** helps users find personalized gift ideas and craft thoughtful card messages by inputting details like the recipient's interests, celebration type, and budget ✨ *All you have to do is wrap it.*")
# Chat interface
gr.ChatInterface(
fn=respond,
examples=["Best birthday gift?", "Romantic anniversary idea?", "Budget-friendly gifts?"]
)
demo.launch()