Spaces:
Sleeping
Sleeping
Vela
commited on
Commit
Β·
fa19bce
1
Parent(s):
bb483a7
created project
Browse files- .gitignore +1 -0
- README.md +2 -2
- embedding_model.pkl +3 -0
- requirements.txt +5 -0
- src/__pycache__/main.cpython-312.pyc +0 -0
- src/app.py +21 -0
- src/streamlit_app/__pycache__/embedding_model.cpython-312.pyc +0 -0
- src/streamlit_app/__pycache__/home_page.cpython-312.pyc +0 -0
- src/streamlit_app/__pycache__/model.cpython-312.pyc +0 -0
- src/streamlit_app/embedding_model.py +45 -0
- src/streamlit_app/home_page.py +67 -0
- src/streamlit_app/model.py +25 -0
- src/utils/__pycache__/logger.cpython-312.pyc +0 -0
- src/utils/logger.py +22 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.venv
|
README.md
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
---
|
| 2 |
title: Physiotherapy Assistant
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: streamlit
|
| 7 |
sdk_version: 1.42.1
|
| 8 |
-
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
|
|
|
| 1 |
---
|
| 2 |
title: Physiotherapy Assistant
|
| 3 |
+
emoji: ποΈββοΈπ§ββοΈ
|
| 4 |
colorFrom: pink
|
| 5 |
colorTo: yellow
|
| 6 |
sdk: streamlit
|
| 7 |
sdk_version: 1.42.1
|
| 8 |
+
app_file: src/app.py
|
| 9 |
pinned: false
|
| 10 |
---
|
| 11 |
|
embedding_model.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:714cd4a98832cd66bfbd5c191ad1bd834d562adcc4c71b213c06fb746fac0022
|
| 3 |
+
size 556171912
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
pandas
|
| 3 |
+
groq
|
| 4 |
+
scikit-learn
|
| 5 |
+
sentence_transformers
|
src/__pycache__/main.cpython-312.pyc
ADDED
|
Binary file (434 Bytes). View file
|
|
|
src/app.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from streamlit_app import model,home_page,embedding_model
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def main():
|
| 7 |
+
|
| 8 |
+
home_page.page_config()
|
| 9 |
+
|
| 10 |
+
home_page.page_title()
|
| 11 |
+
|
| 12 |
+
username = home_page.get_or_greet_user_name()
|
| 13 |
+
|
| 14 |
+
if username:
|
| 15 |
+
|
| 16 |
+
home_page.display_chat()
|
| 17 |
+
|
| 18 |
+
home_page.handle_user_input()
|
| 19 |
+
|
| 20 |
+
if __name__ == "__main__":
|
| 21 |
+
main()
|
src/streamlit_app/__pycache__/embedding_model.cpython-312.pyc
ADDED
|
Binary file (2.66 kB). View file
|
|
|
src/streamlit_app/__pycache__/home_page.cpython-312.pyc
ADDED
|
Binary file (5.72 kB). View file
|
|
|
src/streamlit_app/__pycache__/model.cpython-312.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
src/streamlit_app/embedding_model.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sentence_transformers import SentenceTransformer, util
|
| 2 |
+
from sklearn.linear_model import LogisticRegression
|
| 3 |
+
import pickle
|
| 4 |
+
from sklearn.model_selection import train_test_split
|
| 5 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
| 6 |
+
import torch
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
topics = [
|
| 11 |
+
"Ankle Sprain : Recovery exercises for ankle sprains and rehabilitation tips.",
|
| 12 |
+
"Back Pain : Best practices for relieving back pain and strengthening exercises.",
|
| 13 |
+
"Shoulder Injury : Shoulder injury rehabilitation, strengthening, and recovery techniques."
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
def get_encoding_model():
|
| 17 |
+
model_encode = SentenceTransformer('Alibaba-NLP/gte-base-en-v1.5', trust_remote_code=True)
|
| 18 |
+
save_model(model_encode,"embedding_model.pkl")
|
| 19 |
+
|
| 20 |
+
def get_embedding(text):
|
| 21 |
+
if not os.path.exists("embedding_model.pkl"):
|
| 22 |
+
loaded_model = get_encoding_model()
|
| 23 |
+
|
| 24 |
+
model_encode = load_model("embedding_model.pkl")
|
| 25 |
+
embedding = model_encode.encode(text)
|
| 26 |
+
return embedding
|
| 27 |
+
|
| 28 |
+
def save_model(model, filename):
|
| 29 |
+
with open(filename, 'wb') as model_file:
|
| 30 |
+
pickle.dump(model, model_file)
|
| 31 |
+
print(f"Model saved to {filename}")
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def load_model(filename):
|
| 35 |
+
# loaded_model = joblib.load('log_reg_model.pkl')
|
| 36 |
+
with open(filename, 'rb') as model_file:
|
| 37 |
+
loaded_model = pickle.load(model_file)
|
| 38 |
+
print(f"Model loaded from {filename}")
|
| 39 |
+
return loaded_model
|
| 40 |
+
|
| 41 |
+
def get_cosine_similarity(prompt):
|
| 42 |
+
message = "Phsyiotherapy"
|
| 43 |
+
embendding = get_embedding([message,prompt])
|
| 44 |
+
similarity = util.cos_sim(embendding[0], embendding[1]).item()
|
| 45 |
+
return round(similarity, 4)
|
src/streamlit_app/home_page.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from streamlit_app import model,embedding_model
|
| 3 |
+
from utils import logger
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def page_config():
|
| 7 |
+
st.set_page_config(page_title="Physio Assist", layout="wide")
|
| 8 |
+
logger.log("info","Configuring the page layout and title")
|
| 9 |
+
|
| 10 |
+
def page_title():
|
| 11 |
+
st.title("ποΈββοΈπ§ββοΈ Welcome to Physio Assist: Your Physical Therapy Companion")
|
| 12 |
+
logger.log("info", "Setting the title to 'Physio Assist: Your Physical Therapy Companion'")
|
| 13 |
+
|
| 14 |
+
def get_or_greet_user_name():
|
| 15 |
+
if 'user_name' not in st.session_state:
|
| 16 |
+
st.session_state.user_name = None
|
| 17 |
+
logger.log("info","user_name not found in session_state, setting to None.")
|
| 18 |
+
|
| 19 |
+
if st.session_state.user_name is None:
|
| 20 |
+
logger.log("info","user_name is None, requesting user input.")
|
| 21 |
+
user_name = st.text_input("Please let me know your name:",
|
| 22 |
+
placeholder="Enter your name buddy")
|
| 23 |
+
if user_name:
|
| 24 |
+
st.session_state.user_name = user_name
|
| 25 |
+
logger.log("info", f"User entered name: {user_name}. Setting session_state.user_name.")
|
| 26 |
+
st.rerun()
|
| 27 |
+
else:
|
| 28 |
+
logger.log("info", f"User already entered a name: {st.session_state.user_name}. Displaying greeting.")
|
| 29 |
+
return st._bottom.subheader(f"Hello {st.session_state.user_name}! How can I assist you today?")
|
| 30 |
+
|
| 31 |
+
def display_chat():
|
| 32 |
+
logger.log("info","Displaying the chat history.")
|
| 33 |
+
if "messages" not in st.session_state:
|
| 34 |
+
st.session_state.messages = []
|
| 35 |
+
for message in st.session_state.messages:
|
| 36 |
+
with st.chat_message(message["role"]):
|
| 37 |
+
st.markdown(message["content"])
|
| 38 |
+
logger.log("info", f"Displayed {len(st.session_state.messages)} messages from the chat history.")
|
| 39 |
+
|
| 40 |
+
def handle_user_input():
|
| 41 |
+
logger.log("info", "Waiting for user input...")
|
| 42 |
+
prompt = st.chat_input("Ask me anything related to physiotherapy. E.g., 'How can I recover from a sprained ankle?'")
|
| 43 |
+
|
| 44 |
+
if prompt :
|
| 45 |
+
with st.chat_message("user"):
|
| 46 |
+
st.markdown(prompt)
|
| 47 |
+
if prompt:
|
| 48 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 49 |
+
|
| 50 |
+
similarity = embedding_model.get_cosine_similarity(prompt)
|
| 51 |
+
if similarity >= 0.3:
|
| 52 |
+
|
| 53 |
+
with st.spinner("Processing your query..."):
|
| 54 |
+
try:
|
| 55 |
+
response = model.get_physiotherapy_assistant_response(prompt)
|
| 56 |
+
with st.chat_message("assistant"):
|
| 57 |
+
st.markdown(response)
|
| 58 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
| 59 |
+
logger.log("info",f"Assistant response: {response}")
|
| 60 |
+
except Exception as e:
|
| 61 |
+
st.error(f"An error occurred while processing your query: {str(e)}")
|
| 62 |
+
logger.log("error", f"Error processing user query: {str(e)}")
|
| 63 |
+
else:
|
| 64 |
+
response = "Please ask me anything related to physiotherapy"
|
| 65 |
+
with st.chat_message("assistant"):
|
| 66 |
+
st.markdown(response)
|
| 67 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
src/streamlit_app/model.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from groq import Groq
|
| 2 |
+
|
| 3 |
+
client = Groq(
|
| 4 |
+
api_key="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",
|
| 5 |
+
)
|
| 6 |
+
|
| 7 |
+
def get_physiotherapy_assistant_response(prompt : str):
|
| 8 |
+
try:
|
| 9 |
+
system_message = """You are a helpful physiotherapy assistant, trained to provide useful information about exercises, recovery, and treatments.
|
| 10 |
+
You can help users with various physical therapy-related queries."""
|
| 11 |
+
|
| 12 |
+
if not prompt or len(prompt.strip()) < 5:
|
| 13 |
+
return "Please provide more details about your physiotherapy question. I need more context to assist you effectively."
|
| 14 |
+
|
| 15 |
+
chat_completion = client.chat.completions.create(
|
| 16 |
+
messages=[
|
| 17 |
+
{"role": "system", "content": system_message},
|
| 18 |
+
{"role": "user", "content": prompt},
|
| 19 |
+
],
|
| 20 |
+
model="llama-3.3-70b-versatile",
|
| 21 |
+
)
|
| 22 |
+
assistant_response = chat_completion.choices[0].message.content
|
| 23 |
+
return assistant_response
|
| 24 |
+
except Exception as e:
|
| 25 |
+
return f"An error occurred while processing your request: {str(e)}"
|
src/utils/__pycache__/logger.cpython-312.pyc
ADDED
|
Binary file (1.21 kB). View file
|
|
|
src/utils/logger.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
logging.basicConfig(
|
| 4 |
+
level=logging.INFO,
|
| 5 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 6 |
+
datefmt='%Y-%m-%d %H:%M')
|
| 7 |
+
|
| 8 |
+
def log(log_type,message):
|
| 9 |
+
try:
|
| 10 |
+
log_type = log_type.lower()
|
| 11 |
+
if log_type == "info":
|
| 12 |
+
return logging.info(message)
|
| 13 |
+
elif log_type == "debug":
|
| 14 |
+
return logging.debug(message)
|
| 15 |
+
elif log_type == "warning":
|
| 16 |
+
return logging.error(message)
|
| 17 |
+
elif log_type == "critical":
|
| 18 |
+
return logging.critical(message)
|
| 19 |
+
elif log_type == "error":
|
| 20 |
+
return logging.error(message)
|
| 21 |
+
except Exception as e:
|
| 22 |
+
return f"Invalid log type.Error : {e}"
|