File size: 1,537 Bytes
34056fb
db7dc1f
34056fb
bd742b0
34056fb
bd742b0
 
e760ec2
e011e3f
 
db7dc1f
 
 
 
 
 
bd742b0
db7dc1f
 
 
bd742b0
db7dc1f
bd742b0
db7dc1f
 
bd742b0
db7dc1f
 
e011e3f
34056fb
 
db7dc1f
 
 
 
 
 
e011e3f
 
34056fb
e011e3f
34056fb
82408e3
e011e3f
bd742b0
cadb75a
e011e3f
cadb75a
bd742b0
e011e3f
bd742b0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import streamlit as st
from transformers import pipeline
import openai
import requests

# Initialize OpenAI API key
openai.api_key = "sk-...1-AA"  # Replace with your actual OpenAI API key

# Function to get responses from OpenAI
def get_chat_response(query):
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",  # Choose the model you want to use
        messages=[
            {"role": "user", "content": query}
        ]
    )
    return response['choices'][0]['message']['content']

class StudyAssistantChatbot:
    def __init__(self):
        # Check if either TensorFlow or PyTorch is installed
        try:
            self.qa_pipeline = pipeline("text-generation", model="distilgpt2")
        except RuntimeError as e:
            st.error(f"Error loading the model: {e}")
            st.error("Please make sure either TensorFlow or PyTorch is installed.")
            raise

# Initialize Streamlit app
st.title("Personalized Study Assistant Chatbot")

# Create chatbot instance
try:
    chatbot = StudyAssistantChatbot()
except RuntimeError:
    st.stop()

# Input for user query
query = st.text_input("Ask your study-related question:")

if st.button("Get Tips and Resources"):
    if query:
        # Get response from OpenAI
        response = get_chat_response(query)
        st.write(response)
    else:
        st.write("Please enter a question to get started!")

# Add a sidebar for additional options
st.sidebar.header("About")
st.sidebar.text("This is a personalized study assistant chatbot.")