arpita-23 commited on
Commit
5a76032
·
verified ·
1 Parent(s): 77692aa

Upload 6 files

Browse files
Files changed (6) hide show
  1. .env +4 -0
  2. .gitignore +2 -0
  3. README.md +2 -12
  4. app.py +68 -0
  5. pyvenv.cfg +5 -0
  6. requirements.txt +7 -0
.env ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ GOOGLE_API_KEY=AIzaSyB8XqUjANeAvV1Z8O24wKzD90gly3jZmTA
2
+
3
+
4
+
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Created by venv; see https://docs.python.org/3/library/venv.html
2
+ *
README.md CHANGED
@@ -1,12 +1,2 @@
1
- ---
2
- title: ML Galaxy
3
- emoji: ⚡
4
- colorFrom: yellow
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.41.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # gemini-pro-streamlit-chatbot
2
+ This repository is about building a chatbot using Google's Gemini-Pro with streamlit.
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from dotenv import load_dotenv
4
+ import google.generativeai as gen_ai
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+
9
+ # Configure Streamlit page settings
10
+ st.set_page_config(
11
+ page_title="ML Galaxy!",
12
+ page_icon=":brain:", # Favicon emoji
13
+ layout="centered", # Page layout option
14
+ )
15
+
16
+ # Retrieve the Google API key from the environment
17
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
18
+
19
+ # Check if the API key is loaded
20
+ if not GOOGLE_API_KEY:
21
+ st.error("API key not found! Please set the GOOGLE_API_KEY in your .env file.")
22
+ st.stop()
23
+
24
+ # Configure the Generative AI model
25
+ try:
26
+ gen_ai.configure(api_key=GOOGLE_API_KEY)
27
+ model = gen_ai.GenerativeModel("gemini-pro")
28
+ except Exception as e:
29
+ st.error(f"Error initializing the Gemini-Pro model: {e}")
30
+ st.stop()
31
+
32
+ # Function to translate roles between Gemini-Pro and Streamlit terminology
33
+ def translate_role_for_streamlit(user_role):
34
+ return "assistant" if user_role == "model" else user_role
35
+
36
+ # Initialize the chat session if not already present in session state
37
+ if "chat_session" not in st.session_state:
38
+ try:
39
+ st.session_state.chat_session = model.start_chat(history=[])
40
+ except Exception as e:
41
+ st.error(f"Error initializing chat session: {e}")
42
+ st.stop()
43
+
44
+ # Display the chatbot's title
45
+ st.title("🤖 ML Galaxy")
46
+
47
+ # Display the chat history
48
+ try:
49
+ for message in st.session_state.chat_session.history:
50
+ with st.chat_message(translate_role_for_streamlit(message.role)):
51
+ st.markdown(message.parts[0].text)
52
+ except Exception as e:
53
+ st.error(f"Error displaying chat history: {e}")
54
+
55
+ # Input field for user's message
56
+ user_prompt = st.chat_input("Ask Gemini-Pro...")
57
+ if user_prompt:
58
+ # Add the user's message to the chat and display it
59
+ st.chat_message("user").markdown(user_prompt)
60
+
61
+ # Send the user's message to Gemini-Pro and get the response
62
+ try:
63
+ gemini_response = st.session_state.chat_session.send_message(user_prompt)
64
+ # Display Gemini-Pro's response
65
+ with st.chat_message("assistant"):
66
+ st.markdown(gemini_response.text)
67
+ except Exception as e:
68
+ st.error(f"Error processing your message: {e}")
pyvenv.cfg ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ home = C:\Python313
2
+ include-system-site-packages = false
3
+ version = 3.13.1
4
+ executable = C:\Python313\python.exe
5
+ command = C:\Python313\python.exe -m venv c:\ML_Notes_chatbot\.venv
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ python-dotenv==1.0.1
2
+ google-generativeai==0.3.2
3
+ streamlit==1.30.0
4
+ deep-translator
5
+ deep-translator
6
+ streamlit
7
+