File size: 4,953 Bytes
71d70c4
03dc114
71d70c4
 
 
65c2c25
03dc114
3a34165
8b94786
99fbdc4
3a34165
3fde07c
 
3a34165
 
 
 
 
 
 
 
386b6e4
 
 
3a34165
 
7c7789b
3a34165
 
 
 
 
386b6e4
3a34165
386b6e4
3a34165
386b6e4
3a34165
386b6e4
3a34165
386b6e4
3a34165
 
386b6e4
3a34165
386b6e4
62295b2
3a34165
386b6e4
 
 
3a34165
65c2c25
 
 
0432e96
 
 
65c2c25
0432e96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65c2c25
3a34165
 
c2640c9
 
ceebcc9
314134a
3a34165
 
386b6e4
16e20eb
3a34165
 
 
386b6e4
 
 
 
3a34165
 
bd47041
3a34165
 
 
 
 
f8605db
5d27d42
0432e96
5d27d42
 
 
f8605db
f01f631
f8605db
 
 
 
 
30753d9
 
c021456
f8605db
 
62295b2
69c6583
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import numpy as np
import requests

#LLM we are using
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

#adding text file
with open("be_a_better_you.txt", "r", encoding="utf-8") as file1, open("journal_prompts.txt", "r", encoding="utf-8") as file2, open("workout.txt", "r", encoding="utf-8") as file3:
    wellness_text = file1.read() + "\n" + file2.read() +"\n" + file3.read()

#cleaning up the text
cleaned_text = wellness_text.strip()
chunks = cleaned_text.split("\n")
cleaned_chunks = []

#putting text in chunks
for chunk in chunks:
    stripped_chunk = chunk.strip()
    if stripped_chunk:
        cleaned_chunks.append(stripped_chunk)

#import model for embeddings
model = SentenceTransformer('all-MiniLM-L6-v2')

chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)

def get_top_chunks(query):
  # creating a function taking query as my parameter
    query_embedding = model.encode(query, convert_to_tensor=True)
  # encode query to vector embedding for comparison
    query_embedding_normalized = query_embedding / query_embedding.norm()
  # normalize query to 1: allows for comparison of meaning
    chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
  # normalizing chunks for comparison of meaning
    similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
  # using matmul (matrix multiplication) method to compare query to chunks
    top_indices = torch.topk(similarities, k=3).indices
  # get the indices of the chunks thart are most similar to my query

    top_chunks = []

    for i in top_indices:
        chunk = cleaned_chunks[i]
    # for each index number in top_indices, get back the text
        top_chunks.append(chunk)

    return top_chunks

def get_nutrition_info(food_query):
    url = "https://trackapi.nutritionix.com/v2/natural/nutrients"
    headers = {
        "x-app-id": "5e0843",
        "x-app-key": "00948483e0e3540ea8dc5297ad8216d9",
        "Content-Type": "application/json"
    }
    body = {
        "query": food_query
    }

    response = requests.post(url, headers=headers, json=body)
    if response.status_code == 200:
        data = response.json()
        return data
    else:
        return None


def get_motivational_quote():
    url = "https://zenquotes.io/api/random"
    response = requests.get(url)
    if response.status_code == 200:
        quote = response.json()['q'] + " -" + response.json()[0]['a']
        return quote
    else:
        return "Keep going! You're doing great."



def respond(message, history):
    messages = [{
        "role": "system", 
        "content": "You are a big sister chatbot named Nessie. You help people feel better in a simple manner. Always reply in 3 sentences or less. Do NOT stop mid-sentence, as you may confuse them even more. When the user is asking for ideas give 5 max, as you don't want to overwhelm them"
    }]
    # change the personality
    context = get_top_chunks(message)
    
    if history:
        messages.extend(history)
    messages.append({"role": "user", "content": message})

    user_context = f"{message}\nInformation: {context}"
    messages.append({"role": "user", "content": user_context})

    response = ""
    for messages in client.chat_completion(
        messages,
        max_tokens = 200,
        stream = True,
    ):
        token = messages.choices[0].delta.content
        response+= token 
        yield response

theme = gr.themes.Soft(
    primary_hue="rose",
    secondary_hue="zinc",
    neutral_hue="pink",
)

with gr.Blocks(theme=theme) as demo:
    chatbot = gr.ChatInterface(
        fn=respond,
        type='messages',
        title="Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?",
        examples=[
            "Can you help me with my dietary goals? I want to track my calories, macros, and get advice based on myself.",
            "Can you help me reach my fitness goals? I would like guidance and recommendations on workouts based on my goals.",
            "Can you give me some journal prompts? I want to start journaling to help myself reflect on my goals and have some daily affirmations. "
        ]
    )
    #chatbot = gr.ChatInterface(respond, type = 'messages', title= "Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?",examples=["Can I help you with your dietary goals? I can help you track your calories, macros, and give advice based on personal goals, height, and weight.","Can I help you with your physical health and help you reach your fitness goals? I can give guidance and recommendations for specific workouts based on personal goals.","If you are struggling, I am here. You are so beautiful and so loved! I'm here for whatever you need. "])
    demo.launch(debug=True)