Spaces:
Running
Running
| # - Modified based on given code with corrections and improvements | |
| # - Fixed Gemini API integration by using google.generativeai instead of manual API requests | |
| # - Added support for multiple responses (though Gemini API requires handling this differently) | |
| # - Improved error handling for better reliability | |
| # - Refined creative mode and fact-checking integration into the prompt | |
| # - Optimized token handling for better text generation performance | |
| # - Added support for multiple languages (English and Tagalog) | |
| # - Added Streamlit Secrets for secure API key storage | |
| # - Added Streamlit Session State for persistent model initialization | |
| # - Added Streamlit Sidebar for configuration settings | |
| # - Added Streamlit Warnings and Errors for better user feedback | |
| # - Added Streamlit Markdown for better text formatting | |
| # - Added Streamlit Title and Header for better UI design | |
| # - Added Streamlit Selectbox, Slider, Number Input, Text Area, Button, and Checkbox for user input | |
| import streamlit as st # Import Streamlit for building the web app | |
| import google.generativeai as genai # Import Google Generative AI SDK for text generation | |
| # Load Gemini API key from Streamlit Secrets configuration | |
| api_key = st.secrets["gemini"]["api_key"] | |
| # Configure the generative AI model with API key | |
| genai.configure(api_key=api_key) | |
| # Define the model ID for the AI-powered text generation | |
| MODEL_ID = "gemini-1.5-flash" | |
| # Initialize the model if it is not already in session state | |
| if "model" not in st.session_state: | |
| st.session_state.model = genai.GenerativeModel(MODEL_ID) | |
| # Retrieve the AI model instance | |
| model = st.session_state.model | |
| # Page title | |
| st.title("LemmaTeks: AI-Powered Text Generator") | |
| # Sidebar for settings | |
| with st.sidebar: | |
| st.header("Configuration") | |
| # Dropdown menu for selecting output format (e.g., Story, Poem, Article, Code) | |
| output_format = st.selectbox("Choose Output Format:", ["Story", "Poem", "Article", "Code"]) | |
| # Dropdown menu for selecting tone/style of generated text | |
| tone_style = st.selectbox("Select Tone/Style:", ["Formal", "Informal", "Humorous", "Technical"]) | |
| # Dropdown menu for selecting the language of the generated text | |
| language = st.selectbox("Select Language:", ["English", "Tagalog"]) | |
| # Slider for specifying text length in words | |
| text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50) | |
| # Slider for adjusting creativity level of AI-generated response | |
| creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1) | |
| # Input box to set the number of responses to generate | |
| num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, step=1) | |
| # Checkbox to enable creative mode, which enhances creativity in AI responses | |
| creative_mode = st.checkbox("Enable Creative Mode") | |
| # Checkbox to enable fact-checking, ensuring responses are supported by evidence | |
| fact_checking = st.checkbox("Enable Fact-Checking") | |
| # Text input field for user prompt | |
| user_prompt = st.text_area("Enter Your Prompt Here:") | |
| # Modify the prompt based on selected settings | |
| if creative_mode: | |
| user_prompt += " Optimize the creativity of your response." | |
| if fact_checking: | |
| user_prompt += " Support your answer with evidence." | |
| # Adjust prompt for language selection | |
| if language != "English": | |
| user_prompt = f"Write in {language}: {user_prompt}" | |
| if language != "Tagalog": | |
| user_prompt = f"Sumulat sa {language}: {user_prompt}" | |
| # Submit button to generate AI-powered text | |
| if st.button("Generate"): | |
| # Check if prompt is empty | |
| if user_prompt.strip() == "": | |
| st.warning("Please enter a prompt before generating!") | |
| else: | |
| st.write("Generating responses...") | |
| try: | |
| # Generate AI content based on user settings | |
| response = model.generate_content( | |
| f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}", | |
| generation_config=genai.GenerationConfig( | |
| max_output_tokens=text_length * 2, # Convert word count to token estimate | |
| temperature=creativity_level # Control randomness of response | |
| ) | |
| ) | |
| # Display generated text | |
| st.markdown(response.text) | |
| except Exception as e: | |
| # Handle errors gracefully | |
| st.error(f"An unexpected error occurred: {e}") | |