lisaude0512 commited on
Commit
7ee0f1f
Β·
verified Β·
1 Parent(s): 57d79d0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -0
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from sentence_transformers import SentenceTransformer
4
+ import torch
5
+ import numpy as np
6
+ import requests
7
+
8
+ #LLM we are using
9
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
+
11
+ #adding text file
12
+ with open("be_a_better_you.txt", "r", encoding="utf-8") as file1, open("journal_prompts.txt", "r", encoding="utf-8") as file2, open("workout.txt", "r", encoding="utf-8") as file3:
13
+ wellness_text = file1.read() + "\n" + file2.read() +"\n" + file3.read()
14
+
15
+ #cleaning up the text
16
+ cleaned_text = wellness_text.strip()
17
+ chunks = cleaned_text.split("\n")
18
+ cleaned_chunks = []
19
+
20
+ #putting text in chunks
21
+ for chunk in chunks:
22
+ stripped_chunk = chunk.strip()
23
+ if stripped_chunk:
24
+ cleaned_chunks.append(stripped_chunk)
25
+
26
+ #import model for embeddings
27
+ model = SentenceTransformer('all-MiniLM-L6-v2')
28
+
29
+ chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)
30
+
31
+ def get_top_chunks(query):
32
+ # creating a function taking query as my parameter
33
+ query_embedding = model.encode(query, convert_to_tensor=True)
34
+ # encode query to vector embedding for comparison
35
+ query_embedding_normalized = query_embedding / query_embedding.norm()
36
+ # normalize query to 1: allows for comparison of meaning
37
+ chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
38
+ # normalizing chunks for comparison of meaning
39
+ similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
40
+ # using matmul (matrix multiplication) method to compare query to chunks
41
+ top_indices = torch.topk(similarities, k=3).indices
42
+ # get the indices of the chunks thart are most similar to my query
43
+
44
+ top_chunks = []
45
+
46
+ for i in top_indices:
47
+ chunk = cleaned_chunks[i]
48
+ # for each index number in top_indices, get back the text
49
+ top_chunks.append(chunk)
50
+
51
+ return top_chunks
52
+
53
+ def get_nutrition_info(food_query):
54
+ url = "https://trackapi.nutritionix.com/v2/natural/nutrients"
55
+ headers = {
56
+ "x-app-id": "5e0843",
57
+ "x-app-key": "00948483e0e3540ea8dc5297ad8216d9",
58
+ "Content-Type": "application/json"
59
+ }
60
+ body = {
61
+ "query": food_query
62
+ }
63
+ response = requests.post(url, headers=headers, json=body)
64
+ if response.status_code == 200:
65
+ data = response.json()
66
+ return data
67
+ else:
68
+ return None
69
+
70
+
71
+ def get_motivational_quote():
72
+ url = "https://zenquotes.io/api/random"
73
+ response = requests.get(url)
74
+ if response.status_code == 200:
75
+ quote = response.json()['q'] + " -" + response.json()[0]['a']
76
+ return quote
77
+ else:
78
+ return "Keep going! You're doing great."
79
+
80
+
81
+
82
+ def respond(message, history):
83
+ messages = [{
84
+ "role": "system",
85
+ "content": "You are a big sister chatbot named Nessie. You help people feel better in a simple manner. Always reply in 3 sentences or less. Do NOT stop mid-sentence, as you may confuse them even more. When the user is asking for ideas give 5 max, as you don't want to overwhelm them"
86
+ }]
87
+ # change the personality
88
+ context = get_top_chunks(message)
89
+
90
+ if history:
91
+ messages.extend(history)
92
+ messages.append({"role": "user", "content": message})
93
+
94
+ user_context = f"{message}\nInformation: {context}"
95
+ messages.append({"role": "user", "content": user_context})
96
+ response = ""
97
+ for messages in client.chat_completion(
98
+ messages,
99
+ max_tokens = 200,
100
+ stream = True,
101
+ ):
102
+ token = messages.choices[0].delta.content
103
+ response+= token
104
+ yield response
105
+
106
+ theme = gr.themes.Soft(
107
+ primary_hue="rose",
108
+ secondary_hue="zinc",
109
+ neutral_hue="pink",
110
+ )
111
+
112
+ with gr.Blocks(theme=theme) as demo:
113
+ chatbot = gr.ChatInterface(
114
+ fn=respond,
115
+ type='messages',
116
+ title="Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?",
117
+ examples=[
118
+ "Can you help me with my dietary goals? I want to track my calories, macros, and get advice based on myself.",
119
+ "Can you help me reach my fitness goals? I would like guidance and recommendations on workouts based on my goals.",
120
+ "Can you give me some journal prompts? I want to start journaling to help myself reflect on my goals and have some daily affirmations. "
121
+ ]
122
+ )
123
+ #chatbot = gr.ChatInterface(respond, type = 'messages', title= "Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?",examples=["Can I help you with your dietary goals? I can help you track your calories, macros, and give advice based on personal goals, height, and weight.","Can I help you with your physical health and help you reach your fitness goals? I can give guidance and recommendations for specific workouts based on personal goals.","If you are struggling, I am here. You are so beautiful and so loved! I'm here for whatever you need. "])
124
+ demo.launch(debug=True)