Rhodham96 commited on
Commit
0f6bf6d
·
1 Parent(s): 7bcf7f0

change to gemini

Browse files
Files changed (1) hide show
  1. chat-streamlit-app.py +55 -97
chat-streamlit-app.py CHANGED
@@ -1,106 +1,64 @@
1
  import streamlit as st
2
- import logging
3
- import time
4
- from transformers import pipeline
5
- import re
6
- from huggingface_hub import login
7
- from dotenv import load_dotenv
8
- import os
9
 
10
-
11
- logging.basicConfig(level=logging.INFO)
12
- #hf_token = os.getenv('HUGGINGFACE_TOKEN')
13
-
14
- # If the token is found, it will be used
15
- #if hf_token:
16
- # logging.info("Hugging Face Token retrieved successfully!")
17
- #else:
18
- # logging.info("Error: Hugging Face Token not found!")
19
- #login(token=st.secrets["HUGGINGFACE_TOKEN"])
20
-
21
- # "Meta-LLaMA 3 8B": "meta-llama/Meta-Llama-3-8B-Instruct",
22
- MODEL_DICT = {
23
- "DialoGPT-medium": "microsoft/DialoGPT-medium",
24
- "flan-t5-small": "google/flan-t5-small",
25
- "Meta-Llama-Guard-2-8B": "meta-llama/Meta-Llama-Guard-2-8B"
26
- }
27
-
28
- # Initialize chat history
29
- if 'messages' not in st.session_state:
30
  st.session_state.messages = []
31
 
32
- if 'current_model' not in st.session_state:
33
- st.session_state.current_model = None
34
-
35
-
36
- def load_generator(model_name):
37
- return pipeline("text-generation", model=model_name)
38
-
39
-
40
- def stream_chat(generator, messages, max_context=5, preprompt="You are a helpful and friendly chatbot."):
41
- try:
42
- history = ""
43
- for msg in messages[-max_context:]:
44
- if msg['role'] == 'user':
45
- history += f"User: {msg['content']}{generator.tokenizer.eos_token}\n"
46
- elif msg['role'] == 'assistant':
47
- history += f"Assistant: {msg['content']}{generator.tokenizer.eos_token}\n"
48
-
49
- prompt = f"{preprompt} User: {messages[-1]['content']}{generator.tokenizer.eos_token}\nAssistant:"
50
-
51
- output = generator(prompt, max_length=150, do_sample=True, temperature=0.7, pad_token_id=generator.tokenizer.eos_token_id)
52
- response = output[0]['generated_text']
53
- response = response.split("Assistant:")[-1].strip()
54
- response = response.replace(generator.tokenizer.eos_token, "").strip()
55
-
56
- return response
57
- except Exception as e:
58
- logging.error(f"Error during generation: {str(e)}")
59
- raise e
60
 
61
  def main():
62
- st.title("Chat with Multiple LLMs")
63
- logging.info("App started")
64
-
65
- # Sidebar for model selection
66
- selected_model_key = st.sidebar.selectbox("Choose a model", list(MODEL_DICT.keys()))
67
- selected_model_name = MODEL_DICT[selected_model_key]
68
-
69
- # Load generator only if model changed
70
- if st.session_state.current_model != selected_model_name:
71
- with st.spinner(f"Loading model {selected_model_key}..."):
72
- #hf_token = os.environ.get("HUGGINGFACE_TOKEN")
73
- st.session_state.generator = load_generator(selected_model_name)
74
- st.session_state.current_model = selected_model_name
75
- st.success(f"Model {selected_model_key} loaded!")
76
-
77
- if prompt := st.chat_input("Your question"):
 
 
 
 
 
78
  st.session_state.messages.append({"role": "user", "content": prompt})
79
- logging.info(f"User input: {prompt}")
80
-
81
- # Display conversation
82
- for message in st.session_state.messages:
83
- with st.chat_message(message["role"]):
84
- st.write(message["content"])
85
-
86
- if st.session_state.messages[-1]["role"] == "user":
87
- with st.chat_message("assistant"):
88
- start_time = time.time()
89
- logging.info("Generating response")
90
-
91
- with st.spinner("Writing..."):
92
- try:
93
- response_message = stream_chat(st.session_state.generator, st.session_state.messages)
94
- duration = time.time() - start_time
95
- response_with_duration = f"{response_message}\n\n⏱ Duration: {duration:.2f} seconds"
96
- st.session_state.messages.append({"role": "assistant", "content": response_with_duration})
97
- st.write(response_with_duration)
98
- logging.info(f"Response generated in {duration:.2f}s")
99
-
100
- except Exception as e:
101
- st.session_state.messages.append({"role": "assistant", "content": str(e)})
102
- st.error("An error occurred while generating the response.")
103
- logging.error(f"Error: {str(e)}")
104
 
105
  if __name__ == "__main__":
106
- main()
 
1
  import streamlit as st
2
+ import google.generativeai as genai
 
 
 
 
 
 
3
 
4
+ # Initialize session state variable for messages
5
+ if "messages" not in st.session_state:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  st.session_state.messages = []
7
 
8
+ def create_sidebar():
9
+ """Creates the sidebar for the app."""
10
+ with st.sidebar:
11
+ st.title("🤖 Gemini Chatbot")
12
+ api_key = st.text_input("Google API Key:", type="password", help="Get your API key from Google Cloud")
13
+ st.markdown("""
14
+ ### What is this?
15
+ A simple chatbot powered by the Gemini language model.
16
+
17
+ ### How to use
18
+ 1. Enter your Google API key in the sidebar.
19
+ 2. Start chatting!
20
+ """)
21
+ return api_key
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def main():
24
+ """Main function to run the chatbot app."""
25
+ st.set_page_config(page_title="Gemini Chatbot")
26
+ api_key = create_sidebar() # Get the API key from the sidebar
27
+ st.title("💬 Gemini Chatbot")
28
+
29
+ if not api_key:
30
+ st.warning("👆 Please enter your Google API key in the sidebar to start.")
31
+ return
32
+
33
+ # Initialize Gemini model
34
+ genai.configure(api_key=api_key)
35
+ model = genai.GenerativeModel("gemini-2.0-flash") # Use gemini-pro for chat
36
+
37
+ # Display chat history
38
+ st.markdown("### 💭 Chat")
39
+ for message in st.session_state.messages:
40
+ with st.chat_message(message["role"]):
41
+ st.markdown(message["content"])
42
+
43
+ # Get user input and generate response
44
+ if prompt := st.chat_input("Say something..."):
45
  st.session_state.messages.append({"role": "user", "content": prompt})
46
+ with st.chat_message("user"):
47
+ st.markdown(prompt)
48
+
49
+ with st.chat_message("assistant"):
50
+ with st.spinner("Thinking..."):
51
+ try:
52
+ response = model.generate_content(prompt)
53
+ if response.text:
54
+ st.markdown(response.text)
55
+ st.session_state.messages.append({"role": "assistant", "content": response.text})
56
+ else:
57
+ st.markdown("Sorry, I couldn't get a response.") # error message
58
+ st.session_state.messages.append({"role": "assistant", "content": "Sorry, I couldn't get a response."})
59
+ except Exception as e:
60
+ st.error(f"An error occurred: {e}") # show the error to the user
61
+ st.session_state.messages.append({"role": "assistant", "content": f"An error occurred: {e}"})
 
 
 
 
 
 
 
 
 
62
 
63
  if __name__ == "__main__":
64
+ main()