Spaces:
Sleeping
Sleeping
File size: 4,485 Bytes
1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 07226e1 1da3e41 1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 07226e1 1da3e41 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 07226e1 1da3e41 1bb33c4 07226e1 1da3e41 1bb33c4 e08a709 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 07226e1 1bb33c4 e08a709 1bb33c4 1da3e41 1bb33c4 1da3e41 1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 07226e1 1bb33c4 ed30634 1bb33c4 ed30634 1bb33c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
# - Modified based on given code with corrections and improvements
# - Fixed Gemini API integration by using google.generativeai instead of manual API requests
# - Added support for multiple responses (though Gemini API requires handling this differently)
# - Improved error handling for better reliability
# - Refined creative mode and fact-checking integration into the prompt
# - Optimized token handling for better text generation performance
# - Added support for multiple languages (English and Tagalog)
# - Added Streamlit Secrets for secure API key storage
# - Added Streamlit Session State for persistent model initialization
# - Added Streamlit Sidebar for configuration settings
# - Added Streamlit Warnings and Errors for better user feedback
# - Added Streamlit Markdown for better text formatting
# - Added Streamlit Title and Header for better UI design
# - Added Streamlit Selectbox, Slider, Number Input, Text Area, Button, and Checkbox for user input
import streamlit as st # Import Streamlit for building the web app
import google.generativeai as genai # Import Google Generative AI SDK for text generation
# Load Gemini API key from Streamlit Secrets configuration
api_key = st.secrets["gemini"]["api_key"]
# Configure the generative AI model with API key
genai.configure(api_key=api_key)
# Define the model ID for the AI-powered text generation
MODEL_ID = "gemini-1.5-flash"
# Initialize the model if it is not already in session state
if "model" not in st.session_state:
st.session_state.model = genai.GenerativeModel(MODEL_ID)
# Retrieve the AI model instance
model = st.session_state.model
# Page title
st.title("LemmaTeks: AI-Powered Text Generator")
# Sidebar for settings
with st.sidebar:
st.header("Configuration")
# Dropdown menu for selecting output format (e.g., Story, Poem, Article, Code)
output_format = st.selectbox("Choose Output Format:", ["Story", "Poem", "Article", "Code"])
# Dropdown menu for selecting tone/style of generated text
tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"])
# Dropdown menu for selecting the language of the generated text
language = st.selectbox("Select Language:", ["English", "Tagalog"])
# Slider for specifying text length in words
text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50)
# Slider for adjusting creativity level of AI-generated response
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1)
# Input box to set the number of responses to generate
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, step=1)
# Checkbox to enable creative mode, which enhances creativity in AI responses
creative_mode = st.checkbox("Enable Creative Mode")
# Checkbox to enable fact-checking, ensuring responses are supported by evidence
fact_checking = st.checkbox("Enable Fact-Checking")
# Text input field for user prompt
user_prompt = st.text_area("Enter Your Prompt Here:")
# Modify the prompt based on selected settings
if creative_mode:
user_prompt += " Optimize the creativity of your response."
if fact_checking:
user_prompt += " Support your answer with evidence."
# Adjust prompt for language selection
if language != "English":
user_prompt = f"Write in {language}: {user_prompt}"
if language != "Tagalog":
user_prompt = f"Sumulat sa {language}: {user_prompt}"
# Submit button to generate AI-powered text
if st.button("Generate"):
# Check if prompt is empty
if user_prompt.strip() == "":
st.warning("Please enter a prompt before generating!")
else:
st.write("Generating responses...")
try:
# Generate AI content based on user settings
response = model.generate_content(
f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}",
generation_config=genai.GenerationConfig(
max_output_tokens=text_length * 2, # Convert word count to token estimate
temperature=creativity_level # Control randomness of response
)
)
# Display generated text
st.markdown(response.text)
except Exception as e:
# Handle errors gracefully
st.error(f"An unexpected error occurred: {e}")
|