Update app.py
Browse files
app.py
CHANGED
|
@@ -4,6 +4,9 @@ import torch
|
|
| 4 |
from groq import Groq
|
| 5 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 6 |
|
|
|
|
|
|
|
|
|
|
| 7 |
# Set up the Groq API Key
|
| 8 |
GROQ_API_KEY = "your_groq_api_key_here" # Replace with your actual key
|
| 9 |
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
|
@@ -11,37 +14,46 @@ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
|
| 11 |
# Initialize the Groq client
|
| 12 |
client = Groq(api_key=GROQ_API_KEY)
|
| 13 |
|
| 14 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
MODEL_NAME = "deepseek-ai/DeepSeek-R1"
|
| 16 |
|
| 17 |
try:
|
| 18 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
| 19 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def generate_response_hf(user_message):
|
| 22 |
-
inputs = tokenizer(user_message, return_tensors="pt").to(
|
| 23 |
outputs = model.generate(**inputs, max_length=200)
|
| 24 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 25 |
|
| 26 |
except Exception as e:
|
| 27 |
-
st.error(f"Error loading DeepSeek-R1: {str(e)}")
|
| 28 |
-
generate_response_hf = lambda x: "Error: Model not loaded."
|
| 29 |
|
| 30 |
# Streamlit UI setup
|
| 31 |
-
st.set_page_config(page_title="AI Study Assistant", page_icon="π€", layout="wide")
|
| 32 |
st.title("π Subject-specific AI Chatbot")
|
| 33 |
st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")
|
| 34 |
|
| 35 |
-
#
|
| 36 |
st.sidebar.header("βοΈ Settings")
|
| 37 |
-
st.sidebar.write("Customize your chatbot experience!")
|
| 38 |
chat_model = st.sidebar.radio("Choose AI Model:", ["Groq API", "DeepSeek R1 (Hugging Face)"])
|
| 39 |
|
| 40 |
-
# Initialize session state for
|
| 41 |
if 'conversation_history' not in st.session_state:
|
| 42 |
st.session_state.conversation_history = []
|
| 43 |
|
| 44 |
-
#
|
| 45 |
subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]
|
| 46 |
|
| 47 |
def generate_chatbot_response(user_message):
|
|
@@ -61,7 +73,7 @@ def generate_chatbot_response(user_message):
|
|
| 61 |
else:
|
| 62 |
return generate_response_hf(prompt)
|
| 63 |
|
| 64 |
-
#
|
| 65 |
st.markdown("### π¬ Chat with me")
|
| 66 |
user_input = st.chat_input("Ask me a subject-related question:")
|
| 67 |
|
|
|
|
| 4 |
from groq import Groq
|
| 5 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 6 |
|
| 7 |
+
# β
Ensure set_page_config() is the first Streamlit command
|
| 8 |
+
st.set_page_config(page_title="AI Study Assistant", page_icon="π€", layout="wide")
|
| 9 |
+
|
| 10 |
# Set up the Groq API Key
|
| 11 |
GROQ_API_KEY = "your_groq_api_key_here" # Replace with your actual key
|
| 12 |
os.environ["GROQ_API_KEY"] = GROQ_API_KEY
|
|
|
|
| 14 |
# Initialize the Groq client
|
| 15 |
client = Groq(api_key=GROQ_API_KEY)
|
| 16 |
|
| 17 |
+
# β
Ensure Accelerate is installed
|
| 18 |
+
try:
|
| 19 |
+
import accelerate # noqa: F401
|
| 20 |
+
except ImportError:
|
| 21 |
+
st.error("β οΈ `accelerate` library is required. Install it with: `pip install accelerate`")
|
| 22 |
+
|
| 23 |
+
# β
Initialize Hugging Face DeepSeek R1 model correctly
|
| 24 |
MODEL_NAME = "deepseek-ai/DeepSeek-R1"
|
| 25 |
|
| 26 |
try:
|
| 27 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
| 28 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 29 |
+
MODEL_NAME,
|
| 30 |
+
trust_remote_code=True,
|
| 31 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
| 32 |
+
device_map="auto" if torch.cuda.is_available() else None
|
| 33 |
+
)
|
| 34 |
|
| 35 |
def generate_response_hf(user_message):
|
| 36 |
+
inputs = tokenizer(user_message, return_tensors="pt").to(model.device)
|
| 37 |
outputs = model.generate(**inputs, max_length=200)
|
| 38 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 39 |
|
| 40 |
except Exception as e:
|
| 41 |
+
st.error(f"β Error loading DeepSeek-R1: {str(e)}")
|
| 42 |
+
generate_response_hf = lambda x: "β οΈ Error: Model not loaded."
|
| 43 |
|
| 44 |
# Streamlit UI setup
|
|
|
|
| 45 |
st.title("π Subject-specific AI Chatbot")
|
| 46 |
st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")
|
| 47 |
|
| 48 |
+
# Sidebar settings
|
| 49 |
st.sidebar.header("βοΈ Settings")
|
|
|
|
| 50 |
chat_model = st.sidebar.radio("Choose AI Model:", ["Groq API", "DeepSeek R1 (Hugging Face)"])
|
| 51 |
|
| 52 |
+
# Initialize session state for conversation
|
| 53 |
if 'conversation_history' not in st.session_state:
|
| 54 |
st.session_state.conversation_history = []
|
| 55 |
|
| 56 |
+
# Subjects list
|
| 57 |
subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]
|
| 58 |
|
| 59 |
def generate_chatbot_response(user_message):
|
|
|
|
| 73 |
else:
|
| 74 |
return generate_response_hf(prompt)
|
| 75 |
|
| 76 |
+
# Chat input
|
| 77 |
st.markdown("### π¬ Chat with me")
|
| 78 |
user_input = st.chat_input("Ask me a subject-related question:")
|
| 79 |
|