Atreyu4EVR commited on
Commit
6d0e9f5
·
verified ·
1 Parent(s): 2acdf87

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -21
app.py CHANGED
@@ -1,8 +1,11 @@
1
  import streamlit as st
2
  import os
 
3
  from openai import OpenAI
4
- import random
 
5
  from dotenv import load_dotenv
 
6
  from huggingface_hub import InferenceClient
7
 
8
  # Load environment variables
@@ -12,12 +15,13 @@ load_dotenv()
12
  MAX_TOKENS = 4000
13
  DEFAULT_TEMPERATURE = 0.5
14
 
15
- # Initialize the client
16
- client = OpenAI(
17
- base_url="https://api-inference.huggingface.co/v1",
18
- api_key=os.environ.get('API_KEY') # Replace with your token
19
- )
20
 
 
 
 
 
 
21
  # Create supported models
22
  model_links = {
23
  "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
@@ -26,6 +30,7 @@ model_links = {
26
  "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
27
  }
28
 
 
29
  # Random dog images for error message
30
  random_dog_images = ["broken_llama3.jpeg"]
31
 
@@ -37,11 +42,12 @@ def reset_conversation():
37
  st.session_state.messages = []
38
  return None
39
 
40
- st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
41
-
42
  def main():
43
  st.header('Multi-Models')
44
 
 
45
  # Sidebar for model selection and temperature
46
  selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
47
  temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE)
@@ -51,6 +57,7 @@ def main():
51
 
52
  if st.session_state.prev_option != selected_model:
53
  st.session_state.messages = []
 
54
  st.session_state.prev_option = selected_model
55
  reset_conversation()
56
 
@@ -60,11 +67,14 @@ def main():
60
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
61
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
62
 
63
- # Add a placeholder for chat messages
64
- if "messages" in st.session_state:
65
- for message in st.session_state.messages:
66
- with st.chat_message(message["role"]):
67
- st.markdown(message["content"])
 
 
 
68
 
69
  # Chat input and response
70
  if prompt := st.chat_input("Type message here..."):
@@ -78,14 +88,32 @@ def process_user_input(client, prompt, selected_model, temperature):
78
 
79
  # Generate and display assistant response
80
  with st.chat_message("assistant"):
81
- response = """😵‍💫 Looks like someone unplugged something!
82
- \n Either the model space is being updated or something is down."""
83
- st.write(response)
84
- random_dog_pick = random.choice(random_dog_images)
85
- st.image(random_dog_pick)
86
- st.write("This was the error message:")
87
- # Replace with the actual error handling
88
- st.write("Error details would go here.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
 
90
  if __name__ == "__main__":
91
  main()
 
1
  import streamlit as st
2
  import os
3
+ import torch
4
  from openai import OpenAI
5
+ import numpy as np
6
+ import sys
7
  from dotenv import load_dotenv
8
+ import random
9
  from huggingface_hub import InferenceClient
10
 
11
  # Load environment variables
 
15
  MAX_TOKENS = 4000
16
  DEFAULT_TEMPERATURE = 0.5
17
 
18
+ # initialize the client
 
 
 
 
19
 
20
+ client = InferenceClient(
21
+ base_url="https://api-inference.huggingface.co/v1",
22
+ api_key=os.environ.get('API_KEY') # Replace with your token
23
+ )
24
+
25
  # Create supported models
26
  model_links = {
27
  "Meta-Llama-3.1-8B": "meta-llama/Meta-Llama-3.1-8B-Instruct",
 
30
  "Falcon-7b-Instruct": "tiiuae/falcon-7b-instruct",
31
  }
32
 
33
+
34
  # Random dog images for error message
35
  random_dog_images = ["broken_llama3.jpeg"]
36
 
 
42
  st.session_state.messages = []
43
  return None
44
 
45
+ st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
46
+
47
  def main():
48
  st.header('Multi-Models')
49
 
50
+
51
  # Sidebar for model selection and temperature
52
  selected_model = st.sidebar.selectbox("Select Model", list(model_links.keys()))
53
  temperature = st.sidebar.slider('Select a temperature value', 0.0, 1.0, DEFAULT_TEMPERATURE)
 
57
 
58
  if st.session_state.prev_option != selected_model:
59
  st.session_state.messages = []
60
+ # st.write(f"Changed to {selected_model}")
61
  st.session_state.prev_option = selected_model
62
  reset_conversation()
63
 
 
67
  st.sidebar.write(f"You're now chatting with **{selected_model}**")
68
  st.sidebar.markdown("*Generated content may be inaccurate or false.*")
69
 
70
+ # Initialize chat history
71
+ if "messages" not in st.session_state:
72
+ st.session_state.messages = []
73
+
74
+ # Display chat messages from history on app rerun
75
+ for message in st.session_state.messages:
76
+ with st.chat_message(message["role"]):
77
+ st.markdown(message["content"])
78
 
79
  # Chat input and response
80
  if prompt := st.chat_input("Type message here..."):
 
88
 
89
  # Generate and display assistant response
90
  with st.chat_message("assistant"):
91
+ try:
92
+ stream = client.chat.completions.create(
93
+ model=model_links[selected_model],
94
+ messages=[
95
+ {"role": m["role"], "content": m["content"]}
96
+ for m in st.session_state.messages
97
+ ],
98
+ temperature=temperature,
99
+ stream=True,
100
+ max_tokens=MAX_TOKENS,
101
+ )
102
+ response = st.write_stream(stream)
103
+ except Exception as e:
104
+ handle_error(e)
105
+ return
106
+
107
+ st.session_state.messages.append({"role": "assistant", "content": response})
108
+
109
+ def handle_error(error):
110
+ response = """😵‍💫 Looks like someone unplugged something!
111
+ \n Either the model space is being updated or something is down."""
112
+ st.write(response)
113
+ random_dog_pick = random.choice(random_dog_images)
114
+ st.image(random_dog_pick)
115
+ st.write("This was the error message:")
116
+ st.write(str(error))
117
 
118
  if __name__ == "__main__":
119
  main()