rukeshpaudel commited on
Commit
76e438b
Β·
1 Parent(s): 96bf1ae

chatbot running

Browse files
Files changed (1) hide show
  1. utils/chatbot.py +48 -46
utils/chatbot.py CHANGED
@@ -1,53 +1,55 @@
1
- from openai import OpenAI
2
  import gradio as gr
3
  import os
4
 
5
- client = OpenAI(api_key=os.environ["OPENAI_API_KEY"]) # Set your API key securely
6
-
7
-
8
- class SpecializedDoctor:
9
- def __init__(self, specialty):
10
- self.specialty = specialty
11
- self.load_prompts()
12
-
13
- def load_prompts(self):
14
- try:
15
- with open(f"utils/prompts.txt", "r") as f:
16
- self.prompts = f.read().splitlines()
17
- except FileNotFoundError:
18
- print(f"Error: Prompts file for '{self.specialty}' not found.")
19
- self.prompts = []
20
-
21
- def provide_medical_advice(self, patient_query):
22
- prompt = f"{self.specialty}: {patient_query}\n{self.prompts[0]}"
23
- response = client.chat.completions.create(
24
- model="gpt-3.5-turbo",
25
- messages=[{"role": "user", "content": prompt}],
26
- temperature=0.7,
27
- max_tokens=150,
28
- top_p=1,
29
- frequency_penalty=0,
30
- presence_penalty=0.4,
31
- )
32
- return response.choices[0].message.content.strip()
33
-
34
-
35
- def chat_response(message, history):
36
- # Assuming you have only one doctor instance:
37
- doctor = SpecializedDoctor("Cardiology") # Change here if you have more
38
- doctor_response = doctor.provide_medical_advice(message)
39
-
40
- # Update history and return
41
- history = [{"role": "user", "content": ""}] # Start with an empty user message
42
- if isinstance(history, dict):
43
- history = [history]
44
- history.append({"role": "user", "content": message})
45
- history.append({"role": "assistant", "content": doctor_response})
46
- return [{"role": "assistant", "content": doctor_response}], history
47
-
48
 
49
  # Create the Gradio interface
50
- iface = gr.ChatInterface(chat_response, title="Cardiologist Assistant")
 
 
 
 
 
 
 
51
 
52
  # Launch the interface
53
- iface.launch()
 
1
+ from openai import OpenAI
2
  import gradio as gr
3
  import os
4
 
5
+ client = OpenAI(
6
+ # This is the default and can be omitted
7
+ api_key=os.environ.get("OPENAI_API_KEY"),
8
+ )
9
+
10
+ def chat_response(prompt, history=[]):
11
+ """
12
+ Provides a chatbot response based on the given prompt and history.
13
+
14
+ Args:
15
+ prompt (str): The user's input prompt.
16
+ history (list, optional): List of previous interactions (user and assistant messages). Defaults to [].
17
+
18
+ Returns:
19
+ str: The assistant's response.
20
+ """
21
+
22
+ # Update history with the user's prompt
23
+ history.append({"role": "user", "content": prompt})
24
+
25
+ # Generate response using OpenAI's chat completion API
26
+ response = client.chat.completions.create(
27
+ model="gpt-3.5-turbo",
28
+ messages=[{"role": "user", "content": prompt}],
29
+ max_tokens=150,
30
+ temperature=0.7,
31
+ top_p=1,
32
+ frequency_penalty=0,
33
+ presence_penalty=0.6,
34
+ stop=None,
35
+ n=1
36
+ )
37
+ assistant_response = response.choices[0].message.content
38
+
39
+ # Update history with the assistant's response
40
+ history.append({"role": "assistant", "content": assistant_response})
41
+
42
+ return assistant_response, history
 
 
 
 
 
43
 
44
  # Create the Gradio interface
45
+ interface = gr.Interface(
46
+ fn=chat_response,
47
+ inputs=gr.Textbox(lines=3, label="Enter your prompt:"),
48
+ outputs="text",
49
+ title="OpenAI Chatbot: Ask Me Anything!",
50
+ #theme="dark",
51
+ #use_legacy_return=True, # Ensure all outputs are returned to Gradio
52
+ )
53
 
54
  # Launch the interface
55
+ interface.launch()