Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| import pandas as pd | |
| import google.generativeai as genai | |
| import os # To potentially use environment variables for API key | |
| # --- Configuration --- | |
| # API Endpoint for initial key issues | |
| KEY_ISSUES_API_URL = "https://adrienbrdne-fastapi-kig.hf.space/" | |
| KEY_ISSUES_ENDPOINT = f"{KEY_ISSUES_API_URL}/generate-key-issues" | |
| # Gemini Model Configuration | |
| GEMINI_MODEL_NAME = "gemini-2.5-flash-preview-04-17" | |
| # --- Helper Functions --- | |
| def call_key_issues_api(query): | |
| """Calls the first API to get key issues.""" | |
| data = {"query": query} | |
| try: | |
| response = requests.post(KEY_ISSUES_ENDPOINT, json=data) | |
| response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) | |
| return response.json() | |
| except requests.exceptions.RequestException as e: | |
| st.error(f"Error calling Key Issues API: {e}") | |
| return None | |
| except Exception as e: | |
| st.error(f"An unexpected error occurred during Key Issues API call: {e}") | |
| return None | |
| def call_gemini_api(api_key, title, description, technical_topic): | |
| """Calls the Gemini API to generate a problematic.""" | |
| if not api_key: | |
| st.error("Gemini API Key is missing. Please enter it above.") | |
| return None | |
| try: | |
| # Configure the Gemini client | |
| genai.configure(api_key=api_key) | |
| # Define the prompt using an f-string | |
| prompt = f"""I want you to create a technical problematic using a key issue composed of a title and a detailed description. | |
| Here is the title of the key issue to deal with: <title>{title}</title> | |
| And here is the associated description: <description>{description}</description> | |
| This key issue is part of the following technical topic: <technical_topic>{technical_topic}</technical_topic> | |
| The problematic must be in interrogative form. | |
| As the output, I only want you to provide the problematic found, nothing else. | |
| Here are examples of problematics that you could create, it shows the level of specificity that you should aim for: | |
| Example 1: 'How can autonomous, policy-driven security decisions be achieved by distributed network elements in a telecommunication network without continuous central authority interaction, whilst ensuring overall network security objectives?' | |
| Example 2: 'How can secure communication between network elements and the confidentiality of network-related information (network element identities, topology) be guaranteed in 6G networks against unauthorized disclosure?' | |
| Example 3: 'How can secure access to and communication with network elements be achieved in a 6G network, considering the heterogeneous and dynamic nature of network elements and the evolving landscape of cyber threats?' | |
| Example 4: 'How can telecommunication systems guarantee long-term security against quantum attacks, considering the complexity and heterogeneity of current and future network architectures and services?' | |
| As far as possible, avoid using acronyms in the problematic. | |
| Try to be about the same length as the examples if possible.""" | |
| # Create the model instance | |
| model = genai.GenerativeModel(GEMINI_MODEL_NAME) | |
| # Generate content | |
| # Note: The config part from the original snippet isn't directly used here, | |
| # plain text is the default for generate_content with text prompts. | |
| response = model.generate_content(prompt) | |
| # Check for safety ratings or blocks if necessary (optional) | |
| # if response.prompt_feedback.block_reason: | |
| # st.error(f"Content generation blocked: {response.prompt_feedback.block_reason}") | |
| # return None | |
| return response.text | |
| except Exception as e: | |
| st.error(f"Error calling Gemini API: {e}") | |
| return None | |
| # --- Streamlit Interface --- | |
| st.set_page_config(page_title="Problematic Generator", layout="wide") | |
| st.title("Generate Problematics from Query") | |
| # --- Session State Initialization --- | |
| # Use session state to store data across reruns | |
| if 'key_issues_df' not in st.session_state: | |
| st.session_state.key_issues_df = None | |
| if 'original_query' not in st.session_state: | |
| st.session_state.original_query = "" | |
| if 'generated_problematic' not in st.session_state: | |
| st.session_state.generated_problematic = None | |
| if 'selected_index' not in st.session_state: | |
| st.session_state.selected_index = 0 # Default to first index | |
| # --- API Key Input --- | |
| st.sidebar.header("API Configuration") | |
| # Try getting key from environment first, then fallback to input | |
| default_gemini_key = os.environ.get("GEMINI_API_KEY", "") | |
| gemini_api_key = st.sidebar.text_input( | |
| "Enter your Gemini API Key:", | |
| type="password", | |
| value=default_gemini_key, # Pre-fill if found in env var | |
| help="You can get your API key from Google AI Studio." | |
| ) | |
| # --- Step 1: Get Key Issues --- | |
| st.header("Step 1: Generate Key Issues") | |
| user_query_input = st.text_input( | |
| "Enter your technical topic/query:", | |
| placeholder="Example: deploying edge computing for real-time AI-driven traffic management systems in smart cities", | |
| key="query_input_key" # Unique key for the input widget | |
| ) | |
| if st.button("Generate Key Issues", key="generate_issues_button"): | |
| # Clear previous results when generating new issues | |
| st.session_state.key_issues_df = None | |
| st.session_state.generated_problematic = None | |
| st.session_state.original_query = "" | |
| st.session_state.selected_index = 0 | |
| if user_query_input: | |
| st.session_state.original_query = user_query_input # Store the query | |
| st.info(f"Sending query to Key Issues API: {KEY_ISSUES_ENDPOINT}") | |
| with st.spinner("Calling API to generate key issues..."): | |
| result = call_key_issues_api(user_query_input) | |
| if result: | |
| if 'key_issues' in result and isinstance(result['key_issues'], list) and result['key_issues']: | |
| st.success("Key Issues received successfully!") | |
| temp_df = pd.DataFrame(result['key_issues']) | |
| if 'id' in temp_df.columns: | |
| temp_df.drop(columns=['id'], inplace=True) | |
| # Ensure 'title' and 'description' columns exist | |
| if 'title' in temp_df.columns and 'description' in temp_df.columns: | |
| # Select and reorder necessary columns, reset index for selection | |
| st.session_state.key_issues_df = temp_df | |
| else: | |
| st.error("API response is missing 'title' or 'description' columns in 'key_issues'.") | |
| st.json(result) # Show response for debugging | |
| else: | |
| st.error("API response does not contain a valid 'key_issues' list.") | |
| st.json(result) # Show response for debugging | |
| else: | |
| st.warning("Please enter a query before generating key issues.") | |
| # --- Step 2: Display DataFrame and Select Issue --- | |
| if st.session_state.key_issues_df is not None: | |
| st.subheader("Generated Key Issues:") | |
| st.dataframe(st.session_state.key_issues_df, use_container_width=True) | |
| st.header("Step 2: Select a Key Issue and Generate Problematic") | |
| max_index = len(st.session_state.key_issues_df) - 1 | |
| if max_index >= 0: # Check if DataFrame is not empty | |
| selected_index = st.number_input( | |
| f"Select the index of the key issue to use (0 to {max_index}):", | |
| min_value=0, | |
| max_value=max_index, | |
| value=st.session_state.selected_index, # Use session state value | |
| step=1, | |
| key="index_selector" | |
| ) | |
| st.session_state.selected_index = selected_index # Update session state | |
| # --- Step 3: Call Gemini API --- | |
| if st.button("Generate Problematic for Selected Index", key="generate_problematic_button"): | |
| st.session_state.generated_problematic = None # Clear previous problematic | |
| if not gemini_api_key: | |
| st.error("Please enter your Gemini API Key in the sidebar.") | |
| else: | |
| try: | |
| # Retrieve selected row data | |
| selected_row = st.session_state.key_issues_df.loc[st.session_state.selected_index] | |
| title = selected_row['title'] | |
| description = selected_row['description'] | |
| technical_topic = st.session_state.original_query # Use the stored original query | |
| st.info(f"Generating problematic for index {st.session_state.selected_index} using Gemini...") | |
| with st.spinner("Calling Gemini API..."): | |
| problematic_text = call_gemini_api(gemini_api_key, title, description, technical_topic) | |
| if problematic_text: | |
| st.session_state.generated_problematic = problematic_text | |
| # Error messages are handled within call_gemini_api | |
| except KeyError: | |
| st.error(f"Could not find data for index {st.session_state.selected_index}. Please check the DataFrame.") | |
| except Exception as e: | |
| st.error(f"An unexpected error occurred during problematic generation: {e}") | |
| else: | |
| st.warning("The generated key issues list is empty.") | |
| # --- Step 4: Display Generated Problematic --- | |
| if st.session_state.generated_problematic: | |
| st.subheader("Generated Problematic:") | |
| st.markdown(f"> {st.session_state.generated_problematic}") # Display as a blockquote | |
| # Or use st.text_area for easy copying: | |
| # st.text_area("Generated Problematic:", st.session_state.generated_problematic, height=150) | |
| # --- Initial Instructions --- | |
| if st.session_state.key_issues_df is None and not user_query_input: | |
| st.info("Enter a technical topic/query above and click 'Generate Key Issues' to start.") |