tarrasyed19472007 commited on
Commit
db7dc1f
·
verified ·
1 Parent(s): 9399bac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -16
app.py CHANGED
@@ -1,45 +1,73 @@
1
  import streamlit as st
 
2
  import openai
 
 
3
 
4
  # Initialize OpenAI API key
5
  openai.api_key = "sk-...1-AA" # Replace with your actual OpenAI API key
6
 
7
  # Function to get responses from OpenAI
8
  def get_chat_response(query):
9
- try:
10
- response = openai.ChatCompletion.create(
11
- model="gpt-3.5-turbo", # Use the desired model from OpenAI
12
- messages=[
13
- {"role": "user", "content": query}
14
- ]
15
- )
16
- return response.choices[0].message['content']
17
- except Exception as e:
18
- st.error(f"An error occurred: {e}")
19
- return None
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  # Initialize Streamlit app
22
  st.title("Personalized Study Assistant Chatbot")
23
 
 
 
 
 
 
 
24
  # Input for user query
25
  query = st.text_input("Ask your study-related question:")
26
 
27
- # Generate response
28
  if st.button("Get Tips and Resources"):
29
  if query:
30
- # Get response from OpenAI
31
  response = get_chat_response(query)
32
  if response:
 
33
  st.write(response)
34
  else:
35
- st.write("There was an error generating the response. Please try again.")
 
 
 
 
 
 
 
 
36
  else:
37
  st.write("Please enter a question to get started!")
38
 
39
  # Add a sidebar for additional information
40
  st.sidebar.header("About")
41
- st.sidebar.text("This is a personalized study assistant chatbot."
42
-
43
 
44
 
45
 
 
1
  import streamlit as st
2
+ from transformers import pipeline
3
  import openai
4
+ import requests
5
+ import torch
6
 
7
  # Initialize OpenAI API key
8
  openai.api_key = "sk-...1-AA" # Replace with your actual OpenAI API key
9
 
10
  # Function to get responses from OpenAI
11
  def get_chat_response(query):
12
+ response = openai.ChatCompletion.create(
13
+ model="gpt-3.5-turbo", # Choose the model you want to use
14
+ messages=[
15
+ {"role": "user", "content": query}
16
+ ]
17
+ )
18
+ return response.choices[0].message['content']
19
+
20
+ # Class for the local chatbot using Hugging Face Transformers
21
+ class StudyAssistantChatbot:
22
+ def __init__(self):
23
+ # Load local model (ensure PyTorch is installed)
24
+ try:
25
+ self.qa_pipeline = pipeline("text-generation", model="distilgpt2")
26
+ except RuntimeError as e:
27
+ st.error(f"Error loading the model: {e}")
28
+ st.error("Please make sure either TensorFlow or PyTorch is installed.")
29
+ raise
30
+
31
+ def generate_study_tips(self, query):
32
+ # Generate a local response
33
+ response = self.qa_pipeline(query, max_length=50, num_return_sequences=1)
34
+ return response[0]['generated_text']
35
 
36
  # Initialize Streamlit app
37
  st.title("Personalized Study Assistant Chatbot")
38
 
39
+ # Create chatbot instance
40
+ try:
41
+ chatbot = StudyAssistantChatbot()
42
+ except RuntimeError:
43
+ st.stop()
44
+
45
  # Input for user query
46
  query = st.text_input("Ask your study-related question:")
47
 
 
48
  if st.button("Get Tips and Resources"):
49
  if query:
50
+ # Get response from OpenAI API
51
  response = get_chat_response(query)
52
  if response:
53
+ st.subheader("OpenAI GPT Response:")
54
  st.write(response)
55
  else:
56
+ st.write("Unable to get a response from OpenAI at the moment.")
57
+
58
+ # Get study tips from the local model as well
59
+ st.subheader("Study Tips from Local Model:")
60
+ tips_response = chatbot.generate_study_tips(query)
61
+ if tips_response:
62
+ st.write(tips_response)
63
+ else:
64
+ st.write("Unable to generate study tips at the moment.")
65
  else:
66
  st.write("Please enter a question to get started!")
67
 
68
  # Add a sidebar for additional information
69
  st.sidebar.header("About")
70
+ st.sidebar.text("This is a personalized study assistant chatbot that provides responses from OpenAI GPT and study tips generated locally.")
 
71
 
72
 
73