File size: 4,536 Bytes
7ee0f1f
 
 
 
 
 
 
69d0027
b75091e
7ee0f1f
69d0027
7ee0f1f
69d0027
7ee0f1f
69d0027
7ee0f1f
 
69d0027
7ee0f1f
69d0027
7ee0f1f
 
 
 
 
 
 
 
 
69d0027
7ee0f1f
 
 
 
 
 
 
 
 
 
 
 
 
 
69d0027
7ee0f1f
 
 
 
 
 
 
 
 
 
 
 
 
d96a4e9
01b3589
 
7ee0f1f
69d0027
de9b1f3
7ee0f1f
 
 
 
 
 
 
d96a4e9
7ee0f1f
69d0027
7ee0f1f
 
 
0a53fcb
69d0027
7ee0f1f
 
69d0027
7ee0f1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69d0027
7ee0f1f
 
69d0027
 
 
 
8827645
 
 
 
 
 
 
69d0027
8827645
69d0027
 
 
 
7ee0f1f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import gradio as gr
from huggingface_hub import InferenceClient
from sentence_transformers import SentenceTransformer
import torch
import numpy as np
import requests

# LLM we are using
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")

# adding text files
with open("be_a_better_you.txt", "r", encoding="utf-8") as file1, open("journal_prompts.txt", "r", encoding="utf-8") as file2, open("workout.txt", "r", encoding="utf-8") as file3:
    wellness_text = file1.read() + "\n" + file2.read() + "\n" + file3.read()

# cleaning up the text
cleaned_text = wellness_text.strip()
chunks = cleaned_text.split("\n")
cleaned_chunks = [chunk.strip() for chunk in chunks if chunk.strip()]

# import model for embeddings
model = SentenceTransformer('all-MiniLM-L6-v2')
chunk_embeddings = model.encode(cleaned_chunks, convert_to_tensor=True)

def get_top_chunks(query):
    query_embedding = model.encode(query, convert_to_tensor=True)
    query_embedding_normalized = query_embedding / query_embedding.norm()
    chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
    similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized)
    top_indices = torch.topk(similarities, k=3).indices
    top_chunks = [cleaned_chunks[i] for i in top_indices]
    return top_chunks

def get_nutrition_info(food_query):
    url = "https://trackapi.nutritionix.com/v2/natural/nutrients"
    headers = {
        "x-app-id": "5e0843",
        "x-app-key": "00948483e0e3540ea8dc5297ad8216d9",
        "Content-Type": "application/json"
    }
    body = {
        "query": food_query
    }
    response = requests.post(url, headers=headers, json=body)
    if response.status_code == 200:
        return response.json()
    else:
        return None

def get_motivational_quote():
    url = "https://zenquotes.io/api/random"
    response = requests.get(url)
    if response.status_code == 200:
        quote = response.json()['q'] + " -" + response.json()[0]['a']
        return quote
    else:
        return "Keep going! You're doing great."

def respond(message, history):
    user_knowledge = get_top_chunks(message)
    user_nutrition = get_nutrition_info(message)
    user_motivation = get_motivational_quote
    messages = [{
        "role": "system",
        "content": f"Get information from {user_knowledge},{user_nutrition}, and {user_motivation}. You are a big sister chatbot named Nessie. You help people feel better in a simple manner. You need to be kind! please! Always reply in 3 sentences. Or if a list, 5 bullet points. Do NOT stop mid-sentence, even if more tokens are needed. You may only add tokens to complete the third sentence. When the user is asking for ideas give 5 max, as you don't want to overwhelm them"
    }]
    context = get_top_chunks(message)
    if history:
        messages.extend(history)
    messages.append({"role": "user", "content": message})

    user_context = f"{message}\nInformation: {context}"

    messages.append({"role": "user", "content": user_context})

    response = ""
    for messages in client.chat_completion(
        messages,
        max_tokens=10000,
        stream=True,
    ):
        token = messages.choices[0].delta.content
        response += token
        yield response

theme = gr.themes.Soft(
    primary_hue="rose",
    secondary_hue="zinc",
    neutral_hue="pink",
)

with gr.Blocks(theme=theme) as demo:
    chatbot = gr.ChatInterface(
        fn=respond,
        type='messages',
        title="Hi! I'm Nessie, your personal wellness assistant. What can I assist you with today?",
        examples=[
            "Can you help me with my dietary goals? I want to track my calories, macros, and get advice based on myself.",
            "Can you help me reach my fitness goals? I would like guidance and recommendations on workouts based on my goals.",
            "Can you give me some journal prompts? I want to start journaling to help myself reflect on my goals and have some daily affirmations."
        ]
    )

    with gr.Row():
        gr.HTML(
            """
            <iframe style="border-radius:12px"
                src="https://open.spotify.com/embed/playlist/5dBkWtszs4KBDjwmpedb66"
                width="100%"
                height="152"
                frameborder="0"
                allow="autoplay; clipboard-write; encrypted-media; fullscreen; picture-in-picture"
                loading="lazy">
            </iframe>

            """
        )

    # Launch the app
    demo.launch(debug=True)