Ci-Dave commited on
Commit
1bb33c4
·
1 Parent(s): e08a709

Added Streamlit Authenticator for secure login/logout functionality

Browse files
Files changed (1) hide show
  1. app.py +71 -22
app.py CHANGED
@@ -1,82 +1,131 @@
1
- # Modified based on given code with corrections and improvements
2
  # - Fixed Gemini API integration by using google.generativeai instead of manual API requests
3
  # - Added support for multiple responses (though Gemini API requires handling this differently)
4
  # - Improved error handling for better reliability
5
  # - Refined creative mode and fact-checking integration into the prompt
6
  # - Optimized token handling for better text generation performance
 
 
 
 
 
 
 
 
 
 
7
 
8
- import streamlit as st
9
- import google.generativeai as genai
 
10
 
11
- # Gemini API key
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  api_key = st.secrets["gemini"]["api_key"]
 
 
13
  genai.configure(api_key=api_key)
14
 
15
- # Initialize Model
16
  MODEL_ID = "gemini-1.5-flash"
 
 
17
  if "model" not in st.session_state:
18
  st.session_state.model = genai.GenerativeModel(MODEL_ID)
19
 
 
20
  model = st.session_state.model
21
 
22
- # Page Title
23
  st.title("LemmaTeks: AI-Powered Text Generator")
24
 
25
- # Sidebar for Settings
26
  with st.sidebar:
27
  st.header("Configuration")
28
 
29
- # Dropdown for Output Format
30
  output_format = st.selectbox("Choose Output Format:", ["Story", "Poem", "Article", "Code"])
31
 
32
- # Dropdown for Tone/Style
33
  tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
34
 
35
- # Add language selection
36
  language = st.selectbox("Select Language:", ["English", "Tagalog"])
37
 
38
- # Sliders for Text Length and Creativity
39
  text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50)
 
 
40
  creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1)
41
 
42
- # Number of Responses
43
  num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, step=1)
44
 
45
- # Checkboxes for Features
46
  creative_mode = st.checkbox("Enable Creative Mode")
 
 
47
  fact_checking = st.checkbox("Enable Fact-Checking")
48
 
49
- # Text Input Field
50
  user_prompt = st.text_area("Enter Your Prompt Here:")
51
 
52
- # Modify prompt based on settings
53
  if creative_mode:
54
  user_prompt += " Optimize the creativity of your response."
55
  if fact_checking:
56
  user_prompt += " Support your answer with evidence."
57
- # Modify prompt based on language selection
 
58
  if language != "English":
59
  user_prompt = f"Write in {language}: {user_prompt}"
60
  if language != "Tagalog":
61
  user_prompt = f"Sumulat sa {language}: {user_prompt}"
62
 
63
- # Submit Button
64
  if st.button("Generate"):
 
65
  if user_prompt.strip() == "":
66
  st.warning("Please enter a prompt before generating!")
67
  else:
68
  st.write("Generating responses...")
69
-
70
  try:
 
71
  response = model.generate_content(
72
  f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}",
73
  generation_config=genai.GenerationConfig(
74
- max_output_tokens=text_length * 2, # Convert words to token estimate
75
- temperature=creativity_level
76
  )
77
  )
78
-
 
79
  st.markdown(response.text)
80
-
81
  except Exception as e:
 
82
  st.error(f"An unexpected error occurred: {e}")
 
 
1
+ # - Modified based on given code with corrections and improvements
2
  # - Fixed Gemini API integration by using google.generativeai instead of manual API requests
3
  # - Added support for multiple responses (though Gemini API requires handling this differently)
4
  # - Improved error handling for better reliability
5
  # - Refined creative mode and fact-checking integration into the prompt
6
  # - Optimized token handling for better text generation performance
7
+ # - Added support for multiple languages (English and Tagalog)
8
+ # - Added Streamlit Authenticator for secure login/logout functionality
9
+ # - Added Streamlit Secrets for secure API key storage
10
+ # - Added Streamlit Session State for persistent model initialization
11
+ # - Added Streamlit Sidebar for configuration settings
12
+ # - Added Streamlit Warnings and Errors for better user feedback
13
+ # - Added Streamlit Markdown for better text formatting
14
+ # - Added Streamlit Title and Header for better UI design
15
+ # - Added Streamlit Selectbox, Slider, Number Input, Text Area, Button, and Checkbox for user input
16
+ # - Added Streamlit Success for successful login feedback message
17
 
18
+ import streamlit as st # Import Streamlit for building the web app
19
+ import google.generativeai as genai # Import Google Generative AI SDK for text generation
20
+ import streamlit_authenticator as stauth # Import authentication module for user login
21
 
22
+ # Authenticate Streamlit app
23
+ users = {
24
+ "admin": stauth.Hasher(["securepassword"]).generate()[0] # Hash the password securely for admin login
25
+ }
26
+
27
+ # Create an authentication object
28
+ authenticator = stauth.Authenticate(
29
+ {"admin": users["admin"]}, "auth_cookie", "abcdef", cookie_expiry_days=1 # Set authentication details
30
+ )
31
+
32
+ # Login form
33
+ name, authentication_status, username = authenticator.login("Login", "main")
34
+
35
+ # If authentication fails, stop execution
36
+ if not authentication_status:
37
+ st.warning("Please enter valid credentials to access the app.")
38
+ st.stop()
39
+
40
+ # Display welcome message and logout button if authentication is successful
41
+ if authentication_status:
42
+ st.success(f"Welcome, {name}!")
43
+ authenticator.logout("Logout", "sidebar")
44
+
45
+ # Load Gemini API key from Streamlit Secrets configuration
46
  api_key = st.secrets["gemini"]["api_key"]
47
+
48
+ # Configure the generative AI model with API key
49
  genai.configure(api_key=api_key)
50
 
51
+ # Define the model ID for the AI-powered text generation
52
  MODEL_ID = "gemini-1.5-flash"
53
+
54
+ # Initialize the model if it is not already in session state
55
  if "model" not in st.session_state:
56
  st.session_state.model = genai.GenerativeModel(MODEL_ID)
57
 
58
+ # Retrieve the AI model instance
59
  model = st.session_state.model
60
 
61
+ # Page title
62
  st.title("LemmaTeks: AI-Powered Text Generator")
63
 
64
+ # Sidebar for settings
65
  with st.sidebar:
66
  st.header("Configuration")
67
 
68
+ # Dropdown menu for selecting output format (e.g., Story, Poem, Article, Code)
69
  output_format = st.selectbox("Choose Output Format:", ["Story", "Poem", "Article", "Code"])
70
 
71
+ # Dropdown menu for selecting tone/style of generated text
72
  tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
73
 
74
+ # Dropdown menu for selecting the language of the generated text
75
  language = st.selectbox("Select Language:", ["English", "Tagalog"])
76
 
77
+ # Slider for specifying text length in words
78
  text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50)
79
+
80
+ # Slider for adjusting creativity level of AI-generated response
81
  creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1)
82
 
83
+ # Input box to set the number of responses to generate
84
  num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, step=1)
85
 
86
+ # Checkbox to enable creative mode, which enhances creativity in AI responses
87
  creative_mode = st.checkbox("Enable Creative Mode")
88
+
89
+ # Checkbox to enable fact-checking, ensuring responses are supported by evidence
90
  fact_checking = st.checkbox("Enable Fact-Checking")
91
 
92
+ # Text input field for user prompt
93
  user_prompt = st.text_area("Enter Your Prompt Here:")
94
 
95
+ # Modify the prompt based on selected settings
96
  if creative_mode:
97
  user_prompt += " Optimize the creativity of your response."
98
  if fact_checking:
99
  user_prompt += " Support your answer with evidence."
100
+
101
+ # Adjust prompt for language selection
102
  if language != "English":
103
  user_prompt = f"Write in {language}: {user_prompt}"
104
  if language != "Tagalog":
105
  user_prompt = f"Sumulat sa {language}: {user_prompt}"
106
 
107
+ # Submit button to generate AI-powered text
108
  if st.button("Generate"):
109
+ # Check if prompt is empty
110
  if user_prompt.strip() == "":
111
  st.warning("Please enter a prompt before generating!")
112
  else:
113
  st.write("Generating responses...")
114
+
115
  try:
116
+ # Generate AI content based on user settings
117
  response = model.generate_content(
118
  f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}",
119
  generation_config=genai.GenerationConfig(
120
+ max_output_tokens=text_length * 2, # Convert word count to token estimate
121
+ temperature=creativity_level # Control randomness of response
122
  )
123
  )
124
+
125
+ # Display generated text
126
  st.markdown(response.text)
127
+
128
  except Exception as e:
129
+ # Handle errors gracefully
130
  st.error(f"An unexpected error occurred: {e}")
131
+